2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
32 #include "wl12xx_80211.h"
39 #include "vendor_cmd.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
81 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 struct wl1271 *wl = hw->priv;
84 /* copy the current dfs region */
86 wl->dfs_region = request->dfs_region;
88 wlcore_regdomain_config(wl);
91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
96 /* we should hold wl->mutex */
97 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
102 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
110 * this function is being called when the rx_streaming interval
111 * has beed changed or rx_streaming should be disabled
113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
116 int period = wl->conf.rx_streaming.interval;
118 /* don't reconfigure if rx_streaming is disabled */
119 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
122 /* reconfigure/disable according to new streaming_period */
124 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 (wl->conf.rx_streaming.always ||
126 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 ret = wl1271_set_rx_streaming(wl, wlvif, true);
129 ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 /* don't cancel_work_sync since we might deadlock */
131 del_timer_sync(&wlvif->rx_streaming_timer);
137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
140 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 rx_streaming_enable_work);
142 struct wl1271 *wl = wlvif->wl;
144 mutex_lock(&wl->mutex);
146 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 (!wl->conf.rx_streaming.always &&
149 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
152 if (!wl->conf.rx_streaming.interval)
155 ret = wl1271_ps_elp_wakeup(wl);
159 ret = wl1271_set_rx_streaming(wl, wlvif, true);
163 /* stop it after some time of inactivity */
164 mod_timer(&wlvif->rx_streaming_timer,
165 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
168 wl1271_ps_elp_sleep(wl);
170 mutex_unlock(&wl->mutex);
173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
176 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 rx_streaming_disable_work);
178 struct wl1271 *wl = wlvif->wl;
180 mutex_lock(&wl->mutex);
182 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
185 ret = wl1271_ps_elp_wakeup(wl);
189 ret = wl1271_set_rx_streaming(wl, wlvif, false);
194 wl1271_ps_elp_sleep(wl);
196 mutex_unlock(&wl->mutex);
199 static void wl1271_rx_streaming_timer(unsigned long data)
201 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
202 struct wl1271 *wl = wlvif->wl;
203 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
206 /* wl->mutex must be taken */
207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
209 /* if the watchdog is not armed, don't do anything */
210 if (wl->tx_allocated_blocks == 0)
213 cancel_delayed_work(&wl->tx_watchdog_work);
214 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
218 static void wlcore_rc_update_work(struct work_struct *work)
221 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 struct wl1271 *wl = wlvif->wl;
224 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 if (ieee80211_vif_is_mesh(vif)) {
236 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
237 true, wlvif->sta.hlid);
241 wlcore_hw_sta_rc_update(wl, wlvif);
245 wl1271_ps_elp_sleep(wl);
247 mutex_unlock(&wl->mutex);
250 static void wl12xx_tx_watchdog_work(struct work_struct *work)
252 struct delayed_work *dwork;
255 dwork = to_delayed_work(work);
256 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
258 mutex_lock(&wl->mutex);
260 if (unlikely(wl->state != WLCORE_STATE_ON))
263 /* Tx went out in the meantime - everything is ok */
264 if (unlikely(wl->tx_allocated_blocks == 0))
268 * if a ROC is in progress, we might not have any Tx for a long
269 * time (e.g. pending Tx on the non-ROC channels)
271 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
272 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
273 wl->conf.tx.tx_watchdog_timeout);
274 wl12xx_rearm_tx_watchdog_locked(wl);
279 * if a scan is in progress, we might not have any Tx for a long
282 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
283 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_rearm_tx_watchdog_locked(wl);
290 * AP might cache a frame for a long time for a sleeping station,
291 * so rearm the timer if there's an AP interface with stations. If
292 * Tx is genuinely stuck we will most hopefully discover it when all
293 * stations are removed due to inactivity.
295 if (wl->active_sta_count) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
298 wl->conf.tx.tx_watchdog_timeout,
299 wl->active_sta_count);
300 wl12xx_rearm_tx_watchdog_locked(wl);
304 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
305 wl->conf.tx.tx_watchdog_timeout);
306 wl12xx_queue_recovery_work(wl);
309 mutex_unlock(&wl->mutex);
312 static void wlcore_adjust_conf(struct wl1271 *wl)
316 if (!strcmp(fwlog_param, "continuous")) {
317 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
319 } else if (!strcmp(fwlog_param, "dbgpins")) {
320 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
321 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
322 } else if (!strcmp(fwlog_param, "disable")) {
323 wl->conf.fwlog.mem_blocks = 0;
324 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
326 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
330 if (bug_on_recovery != -1)
331 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
333 if (no_recovery != -1)
334 wl->conf.recovery.no_recovery = (u8) no_recovery;
337 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
338 struct wl12xx_vif *wlvif,
343 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
346 * Wake up from high level PS if the STA is asleep with too little
347 * packets in FW or if the STA is awake.
349 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
350 wl12xx_ps_link_end(wl, wlvif, hlid);
353 * Start high-level PS if the STA is asleep with enough blocks in FW.
354 * Make an exception if this is the only connected link. In this
355 * case FW-memory congestion is less of a problem.
356 * Note that a single connected STA means 2*ap_count + 1 active links,
357 * since we must account for the global and broadcast AP links
358 * for each AP. The "fw_ps" check assures us the other link is a STA
359 * connected to the AP. Otherwise the FW would not set the PSM bit.
361 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
362 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_start(wl, wlvif, hlid, true);
366 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
367 struct wl12xx_vif *wlvif,
368 struct wl_fw_status *status)
370 unsigned long cur_fw_ps_map;
373 cur_fw_ps_map = status->link_ps_bitmap;
374 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
375 wl1271_debug(DEBUG_PSM,
376 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
377 wl->ap_fw_ps_map, cur_fw_ps_map,
378 wl->ap_fw_ps_map ^ cur_fw_ps_map);
380 wl->ap_fw_ps_map = cur_fw_ps_map;
383 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
384 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
385 wl->links[hlid].allocated_pkts);
388 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
390 struct wl12xx_vif *wlvif;
392 u32 old_tx_blk_count = wl->tx_blocks_available;
393 int avail, freed_blocks;
396 struct wl1271_link *lnk;
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
400 wl->fw_status_len, false);
404 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
406 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
409 status->fw_rx_counter,
410 status->drv_rx_counter,
411 status->tx_results_counter);
413 for (i = 0; i < NUM_TX_QUEUES; i++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl->tx_allocated_pkts[i] -=
416 (status->counters.tx_released_pkts[i] -
417 wl->tx_pkts_freed[i]) & 0xff;
419 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
423 for_each_set_bit(i, wl->links_map, wl->num_links) {
427 /* prevent wrap-around in freed-packets counter */
428 diff = (status->counters.tx_lnk_free_pkts[i] -
429 lnk->prev_freed_pkts) & 0xff;
434 lnk->allocated_pkts -= diff;
435 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
437 /* accumulate the prev_freed_pkts counter */
438 lnk->total_freed_pkts += diff;
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 freed_blocks = status->total_released_blks -
446 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 status->total_released_blks;
449 wl->tx_blocks_freed = status->total_released_blks;
451 wl->tx_allocated_blocks -= freed_blocks;
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
459 if (wl->tx_allocated_blocks)
460 wl12xx_rearm_tx_watchdog_locked(wl);
462 cancel_delayed_work(&wl->tx_watchdog_work);
465 avail = status->tx_total - wl->tx_allocated_blocks;
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
475 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl->tx_blocks_available > old_tx_blk_count)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 wl12xx_irq_update_links_status(wl, wlvif, status);
487 /* update the host-chipset time offset */
489 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
490 (s64)(status->fw_localtime);
492 wl->fw_fast_lnk_map = status->link_fast_bitmap;
497 static void wl1271_flush_deferred_work(struct wl1271 *wl)
501 /* Pass all received frames to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
503 ieee80211_rx_ni(wl->hw, skb);
505 /* Return sent skbs to the network stack */
506 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
507 ieee80211_tx_status_ni(wl->hw, skb);
510 static void wl1271_netstack_work(struct work_struct *work)
513 container_of(work, struct wl1271, netstack_work);
516 wl1271_flush_deferred_work(wl);
517 } while (skb_queue_len(&wl->deferred_rx_queue));
520 #define WL1271_IRQ_MAX_LOOPS 256
522 static int wlcore_irq_locked(struct wl1271 *wl)
526 int loopcount = WL1271_IRQ_MAX_LOOPS;
528 unsigned int defer_count;
532 * In case edge triggered interrupt must be used, we cannot iterate
533 * more than once without introducing race conditions with the hardirq.
535 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
538 wl1271_debug(DEBUG_IRQ, "IRQ work");
540 if (unlikely(wl->state != WLCORE_STATE_ON))
543 ret = wl1271_ps_elp_wakeup(wl);
547 while (!done && loopcount--) {
549 * In order to avoid a race with the hardirq, clear the flag
550 * before acknowledging the chip. Since the mutex is held,
551 * wl1271_ps_elp_wakeup cannot be called concurrently.
553 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
554 smp_mb__after_atomic();
556 ret = wlcore_fw_status(wl, wl->fw_status);
560 wlcore_hw_tx_immediate_compl(wl);
562 intr = wl->fw_status->intr;
563 intr &= WLCORE_ALL_INTR_MASK;
569 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
570 wl1271_error("HW watchdog interrupt received! starting recovery.");
571 wl->watchdog_recovery = true;
574 /* restarting the chip. ignore any other interrupt. */
578 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
579 wl1271_error("SW watchdog interrupt received! "
580 "starting recovery.");
581 wl->watchdog_recovery = true;
584 /* restarting the chip. ignore any other interrupt. */
588 if (likely(intr & WL1271_ACX_INTR_DATA)) {
589 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
591 ret = wlcore_rx(wl, wl->fw_status);
595 /* Check if any tx blocks were freed */
596 spin_lock_irqsave(&wl->wl_lock, flags);
597 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
598 wl1271_tx_total_queue_count(wl) > 0) {
599 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 * In order to avoid starvation of the TX path,
602 * call the work function directly.
604 ret = wlcore_tx_work_locked(wl);
608 spin_unlock_irqrestore(&wl->wl_lock, flags);
611 /* check for tx results */
612 ret = wlcore_hw_tx_delayed_compl(wl);
616 /* Make sure the deferred queues don't get too long */
617 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
618 skb_queue_len(&wl->deferred_rx_queue);
619 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
620 wl1271_flush_deferred_work(wl);
623 if (intr & WL1271_ACX_INTR_EVENT_A) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
625 ret = wl1271_event_handle(wl, 0);
630 if (intr & WL1271_ACX_INTR_EVENT_B) {
631 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
632 ret = wl1271_event_handle(wl, 1);
637 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
638 wl1271_debug(DEBUG_IRQ,
639 "WL1271_ACX_INTR_INIT_COMPLETE");
641 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
645 wl1271_ps_elp_sleep(wl);
651 static irqreturn_t wlcore_irq(int irq, void *cookie)
655 struct wl1271 *wl = cookie;
657 /* complete the ELP completion */
658 spin_lock_irqsave(&wl->wl_lock, flags);
659 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
661 complete(wl->elp_compl);
662 wl->elp_compl = NULL;
665 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
666 /* don't enqueue a work right now. mark it as pending */
667 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
668 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
669 disable_irq_nosync(wl->irq);
670 pm_wakeup_event(wl->dev, 0);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
674 spin_unlock_irqrestore(&wl->wl_lock, flags);
676 /* TX might be handled here, avoid redundant work */
677 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
678 cancel_work_sync(&wl->tx_work);
680 mutex_lock(&wl->mutex);
682 ret = wlcore_irq_locked(wl);
684 wl12xx_queue_recovery_work(wl);
686 spin_lock_irqsave(&wl->wl_lock, flags);
687 /* In case TX was not handled here, queue TX work */
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
690 wl1271_tx_total_queue_count(wl) > 0)
691 ieee80211_queue_work(wl->hw, &wl->tx_work);
692 spin_unlock_irqrestore(&wl->wl_lock, flags);
694 mutex_unlock(&wl->mutex);
699 struct vif_counter_data {
702 struct ieee80211_vif *cur_vif;
703 bool cur_vif_running;
706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 struct ieee80211_vif *vif)
709 struct vif_counter_data *counter = data;
712 if (counter->cur_vif == vif)
713 counter->cur_vif_running = true;
716 /* caller must not hold wl->mutex, as it might deadlock */
717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 struct ieee80211_vif *cur_vif,
719 struct vif_counter_data *data)
721 memset(data, 0, sizeof(*data));
722 data->cur_vif = cur_vif;
724 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 wl12xx_vif_count_iter, data);
728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
730 const struct firmware *fw;
732 enum wl12xx_fw_type fw_type;
736 fw_type = WL12XX_FW_TYPE_PLT;
737 fw_name = wl->plt_fw_name;
740 * we can't call wl12xx_get_vif_count() here because
741 * wl->mutex is taken, so use the cached last_vif_count value
743 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 fw_type = WL12XX_FW_TYPE_MULTI;
745 fw_name = wl->mr_fw_name;
747 fw_type = WL12XX_FW_TYPE_NORMAL;
748 fw_name = wl->sr_fw_name;
752 if (wl->fw_type == fw_type)
755 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
757 ret = request_firmware(&fw, fw_name, wl->dev);
760 wl1271_error("could not get firmware %s: %d", fw_name, ret);
765 wl1271_error("firmware size is not multiple of 32 bits: %zu",
772 wl->fw_type = WL12XX_FW_TYPE_NONE;
773 wl->fw_len = fw->size;
774 wl->fw = vmalloc(wl->fw_len);
777 wl1271_error("could not allocate memory for the firmware");
782 memcpy(wl->fw, fw->data, wl->fw_len);
784 wl->fw_type = fw_type;
786 release_firmware(fw);
791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
793 /* Avoid a recursive recovery */
794 if (wl->state == WLCORE_STATE_ON) {
795 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
798 wl->state = WLCORE_STATE_RESTARTING;
799 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 wl1271_ps_elp_wakeup(wl);
801 wlcore_disable_interrupts_nosync(wl);
802 ieee80211_queue_work(wl->hw, &wl->recovery_work);
806 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
810 /* Make sure we have enough room */
811 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
813 /* Fill the FW log file, consumed by the sysfs fwlog entry */
814 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
815 wl->fwlog_size += len;
820 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
824 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
827 wl1271_info("Reading FW panic log");
830 * Make sure the chip is awake and the logger isn't active.
831 * Do not send a stop fwlog command if the fw is hanged or if
832 * dbgpins are used (due to some fw bug).
834 if (wl1271_ps_elp_wakeup(wl))
836 if (!wl->watchdog_recovery &&
837 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 wl12xx_cmd_stop_fwlog(wl);
840 /* Traverse the memory blocks linked list */
842 end_of_log = wlcore_event_fw_logger(wl);
843 if (end_of_log == 0) {
845 end_of_log = wlcore_event_fw_logger(wl);
847 } while (end_of_log != 0);
850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 u8 hlid, struct ieee80211_sta *sta)
853 struct wl1271_station *wl_sta;
854 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
856 wl_sta = (void *)sta->drv_priv;
857 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
860 * increment the initial seq number on recovery to account for
861 * transmitted packets that we haven't yet got in the FW status
863 if (wlvif->encryption_type == KEY_GEM)
864 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
866 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 wl_sta->total_freed_pkts += sqn_recovery_padding;
870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 struct wl12xx_vif *wlvif,
872 u8 hlid, const u8 *addr)
874 struct ieee80211_sta *sta;
875 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
877 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 is_zero_ether_addr(addr)))
882 sta = ieee80211_find_sta(vif, addr);
884 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
888 static void wlcore_print_recovery(struct wl1271 *wl)
894 wl1271_info("Hardware recovery in progress. FW ver: %s",
895 wl->chip.fw_ver_str);
897 /* change partitions momentarily so we can read the FW pc */
898 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
902 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
906 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
910 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 pc, hint_sts, ++wl->recovery_count);
913 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
917 static void wl1271_recovery_work(struct work_struct *work)
920 container_of(work, struct wl1271, recovery_work);
921 struct wl12xx_vif *wlvif;
922 struct ieee80211_vif *vif;
924 mutex_lock(&wl->mutex);
926 if (wl->state == WLCORE_STATE_OFF || wl->plt)
929 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
930 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
931 wl12xx_read_fwlog_panic(wl);
932 wlcore_print_recovery(wl);
935 BUG_ON(wl->conf.recovery.bug_on_recovery &&
936 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
938 if (wl->conf.recovery.no_recovery) {
939 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
943 /* Prevent spurious TX during FW restart */
944 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
946 /* reboot the chipset */
947 while (!list_empty(&wl->wlvif_list)) {
948 wlvif = list_first_entry(&wl->wlvif_list,
949 struct wl12xx_vif, list);
950 vif = wl12xx_wlvif_to_vif(wlvif);
952 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
953 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
954 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
955 vif->bss_conf.bssid);
958 __wl1271_op_remove_interface(wl, vif, false);
961 wlcore_op_stop_locked(wl);
963 ieee80211_restart_hw(wl->hw);
966 * Its safe to enable TX now - the queues are stopped after a request
969 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
972 wl->watchdog_recovery = false;
973 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
974 mutex_unlock(&wl->mutex);
977 static int wlcore_fw_wakeup(struct wl1271 *wl)
979 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
982 static int wl1271_setup(struct wl1271 *wl)
984 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
985 if (!wl->raw_fw_status)
988 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
992 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
998 kfree(wl->fw_status);
999 kfree(wl->raw_fw_status);
1003 static int wl12xx_set_power_on(struct wl1271 *wl)
1007 msleep(WL1271_PRE_POWER_ON_SLEEP);
1008 ret = wl1271_power_on(wl);
1011 msleep(WL1271_POWER_ON_SLEEP);
1012 wl1271_io_reset(wl);
1015 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1019 /* ELP module wake up */
1020 ret = wlcore_fw_wakeup(wl);
1028 wl1271_power_off(wl);
1032 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1036 ret = wl12xx_set_power_on(wl);
1041 * For wl127x based devices we could use the default block
1042 * size (512 bytes), but due to a bug in the sdio driver, we
1043 * need to set it explicitly after the chip is powered on. To
1044 * simplify the code and since the performance impact is
1045 * negligible, we use the same block size for all different
1048 * Check if the bus supports blocksize alignment and, if it
1049 * doesn't, make sure we don't have the quirk.
1051 if (!wl1271_set_block_size(wl))
1052 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1054 /* TODO: make sure the lower driver has set things up correctly */
1056 ret = wl1271_setup(wl);
1060 ret = wl12xx_fetch_firmware(wl, plt);
1068 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1070 int retries = WL1271_BOOT_RETRIES;
1071 struct wiphy *wiphy = wl->hw->wiphy;
1073 static const char* const PLT_MODE[] = {
1082 mutex_lock(&wl->mutex);
1084 wl1271_notice("power up");
1086 if (wl->state != WLCORE_STATE_OFF) {
1087 wl1271_error("cannot go into PLT state because not "
1088 "in off state: %d", wl->state);
1093 /* Indicate to lower levels that we are now in PLT mode */
1095 wl->plt_mode = plt_mode;
1099 ret = wl12xx_chip_wakeup(wl, true);
1103 if (plt_mode != PLT_CHIP_AWAKE) {
1104 ret = wl->ops->plt_init(wl);
1109 wl->state = WLCORE_STATE_ON;
1110 wl1271_notice("firmware booted in PLT mode %s (%s)",
1112 wl->chip.fw_ver_str);
1114 /* update hw/fw version info in wiphy struct */
1115 wiphy->hw_version = wl->chip.id;
1116 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1117 sizeof(wiphy->fw_version));
1122 wl1271_power_off(wl);
1126 wl->plt_mode = PLT_OFF;
1128 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1129 WL1271_BOOT_RETRIES);
1131 mutex_unlock(&wl->mutex);
1136 int wl1271_plt_stop(struct wl1271 *wl)
1140 wl1271_notice("power down");
1143 * Interrupts must be disabled before setting the state to OFF.
1144 * Otherwise, the interrupt handler might be called and exit without
1145 * reading the interrupt status.
1147 wlcore_disable_interrupts(wl);
1148 mutex_lock(&wl->mutex);
1150 mutex_unlock(&wl->mutex);
1153 * This will not necessarily enable interrupts as interrupts
1154 * may have been disabled when op_stop was called. It will,
1155 * however, balance the above call to disable_interrupts().
1157 wlcore_enable_interrupts(wl);
1159 wl1271_error("cannot power down because not in PLT "
1160 "state: %d", wl->state);
1165 mutex_unlock(&wl->mutex);
1167 wl1271_flush_deferred_work(wl);
1168 cancel_work_sync(&wl->netstack_work);
1169 cancel_work_sync(&wl->recovery_work);
1170 cancel_delayed_work_sync(&wl->elp_work);
1171 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1173 mutex_lock(&wl->mutex);
1174 wl1271_power_off(wl);
1176 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1177 wl->state = WLCORE_STATE_OFF;
1179 wl->plt_mode = PLT_OFF;
1181 mutex_unlock(&wl->mutex);
1187 static void wl1271_op_tx(struct ieee80211_hw *hw,
1188 struct ieee80211_tx_control *control,
1189 struct sk_buff *skb)
1191 struct wl1271 *wl = hw->priv;
1192 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193 struct ieee80211_vif *vif = info->control.vif;
1194 struct wl12xx_vif *wlvif = NULL;
1195 unsigned long flags;
1200 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1201 ieee80211_free_txskb(hw, skb);
1205 wlvif = wl12xx_vif_to_data(vif);
1206 mapping = skb_get_queue_mapping(skb);
1207 q = wl1271_tx_get_queue(mapping);
1209 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1211 spin_lock_irqsave(&wl->wl_lock, flags);
1214 * drop the packet if the link is invalid or the queue is stopped
1215 * for any reason but watermark. Watermark is a "soft"-stop so we
1216 * allow these packets through.
1218 if (hlid == WL12XX_INVALID_LINK_ID ||
1219 (!test_bit(hlid, wlvif->links_map)) ||
1220 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1221 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1223 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1224 ieee80211_free_txskb(hw, skb);
1228 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1230 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1232 wl->tx_queue_count[q]++;
1233 wlvif->tx_queue_count[q]++;
1236 * The workqueue is slow to process the tx_queue and we need stop
1237 * the queue here, otherwise the queue will get too long.
1239 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1240 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1241 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1242 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1243 wlcore_stop_queue_locked(wl, wlvif, q,
1244 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1248 * The chip specific setup must run before the first TX packet -
1249 * before that, the tx_work will not be initialized!
1252 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1253 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1254 ieee80211_queue_work(wl->hw, &wl->tx_work);
1257 spin_unlock_irqrestore(&wl->wl_lock, flags);
1260 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1262 unsigned long flags;
1265 /* no need to queue a new dummy packet if one is already pending */
1266 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1269 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1271 spin_lock_irqsave(&wl->wl_lock, flags);
1272 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1273 wl->tx_queue_count[q]++;
1274 spin_unlock_irqrestore(&wl->wl_lock, flags);
1276 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1277 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1278 return wlcore_tx_work_locked(wl);
1281 * If the FW TX is busy, TX work will be scheduled by the threaded
1282 * interrupt handler function
1288 * The size of the dummy packet should be at least 1400 bytes. However, in
1289 * order to minimize the number of bus transactions, aligning it to 512 bytes
1290 * boundaries could be beneficial, performance wise
1292 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1294 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1296 struct sk_buff *skb;
1297 struct ieee80211_hdr_3addr *hdr;
1298 unsigned int dummy_packet_size;
1300 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1301 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1303 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1305 wl1271_warning("Failed to allocate a dummy packet skb");
1309 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1311 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1312 memset(hdr, 0, sizeof(*hdr));
1313 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1314 IEEE80211_STYPE_NULLFUNC |
1315 IEEE80211_FCTL_TODS);
1317 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1319 /* Dummy packets require the TID to be management */
1320 skb->priority = WL1271_TID_MGMT;
1322 /* Initialize all fields that might be used */
1323 skb_set_queue_mapping(skb, 0);
1324 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1332 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1334 int num_fields = 0, in_field = 0, fields_size = 0;
1335 int i, pattern_len = 0;
1338 wl1271_warning("No mask in WoWLAN pattern");
1343 * The pattern is broken up into segments of bytes at different offsets
1344 * that need to be checked by the FW filter. Each segment is called
1345 * a field in the FW API. We verify that the total number of fields
1346 * required for this pattern won't exceed FW limits (8)
1347 * as well as the total fields buffer won't exceed the FW limit.
1348 * Note that if there's a pattern which crosses Ethernet/IP header
1349 * boundary a new field is required.
1351 for (i = 0; i < p->pattern_len; i++) {
1352 if (test_bit(i, (unsigned long *)p->mask)) {
1357 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1359 fields_size += pattern_len +
1360 RX_FILTER_FIELD_OVERHEAD;
1368 fields_size += pattern_len +
1369 RX_FILTER_FIELD_OVERHEAD;
1376 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1380 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1381 wl1271_warning("RX Filter too complex. Too many segments");
1385 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1386 wl1271_warning("RX filter pattern is too big");
1393 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1395 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1398 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1405 for (i = 0; i < filter->num_fields; i++)
1406 kfree(filter->fields[i].pattern);
1411 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1412 u16 offset, u8 flags,
1413 const u8 *pattern, u8 len)
1415 struct wl12xx_rx_filter_field *field;
1417 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1418 wl1271_warning("Max fields per RX filter. can't alloc another");
1422 field = &filter->fields[filter->num_fields];
1424 field->pattern = kzalloc(len, GFP_KERNEL);
1425 if (!field->pattern) {
1426 wl1271_warning("Failed to allocate RX filter pattern");
1430 filter->num_fields++;
1432 field->offset = cpu_to_le16(offset);
1433 field->flags = flags;
1435 memcpy(field->pattern, pattern, len);
1440 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1442 int i, fields_size = 0;
1444 for (i = 0; i < filter->num_fields; i++)
1445 fields_size += filter->fields[i].len +
1446 sizeof(struct wl12xx_rx_filter_field) -
1452 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1456 struct wl12xx_rx_filter_field *field;
1458 for (i = 0; i < filter->num_fields; i++) {
1459 field = (struct wl12xx_rx_filter_field *)buf;
1461 field->offset = filter->fields[i].offset;
1462 field->flags = filter->fields[i].flags;
1463 field->len = filter->fields[i].len;
1465 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1466 buf += sizeof(struct wl12xx_rx_filter_field) -
1467 sizeof(u8 *) + field->len;
1472 * Allocates an RX filter returned through f
1473 * which needs to be freed using rx_filter_free()
1476 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1477 struct wl12xx_rx_filter **f)
1480 struct wl12xx_rx_filter *filter;
1484 filter = wl1271_rx_filter_alloc();
1486 wl1271_warning("Failed to alloc rx filter");
1492 while (i < p->pattern_len) {
1493 if (!test_bit(i, (unsigned long *)p->mask)) {
1498 for (j = i; j < p->pattern_len; j++) {
1499 if (!test_bit(j, (unsigned long *)p->mask))
1502 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1503 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1507 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1509 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1511 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1512 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1517 ret = wl1271_rx_filter_alloc_field(filter,
1520 &p->pattern[i], len);
1527 filter->action = FILTER_SIGNAL;
1533 wl1271_rx_filter_free(filter);
1539 static int wl1271_configure_wowlan(struct wl1271 *wl,
1540 struct cfg80211_wowlan *wow)
1544 if (!wow || wow->any || !wow->n_patterns) {
1545 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1550 ret = wl1271_rx_filter_clear_all(wl);
1557 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1560 /* Validate all incoming patterns before clearing current FW state */
1561 for (i = 0; i < wow->n_patterns; i++) {
1562 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1564 wl1271_warning("Bad wowlan pattern %d", i);
1569 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1573 ret = wl1271_rx_filter_clear_all(wl);
1577 /* Translate WoWLAN patterns into filters */
1578 for (i = 0; i < wow->n_patterns; i++) {
1579 struct cfg80211_pkt_pattern *p;
1580 struct wl12xx_rx_filter *filter = NULL;
1582 p = &wow->patterns[i];
1584 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1586 wl1271_warning("Failed to create an RX filter from "
1587 "wowlan pattern %d", i);
1591 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1593 wl1271_rx_filter_free(filter);
1598 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1604 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1605 struct wl12xx_vif *wlvif,
1606 struct cfg80211_wowlan *wow)
1610 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1613 ret = wl1271_configure_wowlan(wl, wow);
1617 if ((wl->conf.conn.suspend_wake_up_event ==
1618 wl->conf.conn.wake_up_event) &&
1619 (wl->conf.conn.suspend_listen_interval ==
1620 wl->conf.conn.listen_interval))
1623 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1624 wl->conf.conn.suspend_wake_up_event,
1625 wl->conf.conn.suspend_listen_interval);
1628 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1634 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1635 struct wl12xx_vif *wlvif,
1636 struct cfg80211_wowlan *wow)
1640 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1643 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1647 ret = wl1271_configure_wowlan(wl, wow);
1656 static int wl1271_configure_suspend(struct wl1271 *wl,
1657 struct wl12xx_vif *wlvif,
1658 struct cfg80211_wowlan *wow)
1660 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1661 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1662 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1663 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1667 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1670 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1671 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1673 if ((!is_ap) && (!is_sta))
1676 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1677 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1680 wl1271_configure_wowlan(wl, NULL);
1683 if ((wl->conf.conn.suspend_wake_up_event ==
1684 wl->conf.conn.wake_up_event) &&
1685 (wl->conf.conn.suspend_listen_interval ==
1686 wl->conf.conn.listen_interval))
1689 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1690 wl->conf.conn.wake_up_event,
1691 wl->conf.conn.listen_interval);
1694 wl1271_error("resume: wake up conditions failed: %d",
1698 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1702 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1703 struct cfg80211_wowlan *wow)
1705 struct wl1271 *wl = hw->priv;
1706 struct wl12xx_vif *wlvif;
1709 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1712 /* we want to perform the recovery before suspending */
1713 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1714 wl1271_warning("postponing suspend to perform recovery");
1718 wl1271_tx_flush(wl);
1720 mutex_lock(&wl->mutex);
1722 ret = wl1271_ps_elp_wakeup(wl);
1724 mutex_unlock(&wl->mutex);
1728 wl->wow_enabled = true;
1729 wl12xx_for_each_wlvif(wl, wlvif) {
1730 if (wlcore_is_p2p_mgmt(wlvif))
1733 ret = wl1271_configure_suspend(wl, wlvif, wow);
1735 mutex_unlock(&wl->mutex);
1736 wl1271_warning("couldn't prepare device to suspend");
1741 /* disable fast link flow control notifications from FW */
1742 ret = wlcore_hw_interrupt_notify(wl, false);
1746 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1747 ret = wlcore_hw_rx_ba_filter(wl,
1748 !!wl->conf.conn.suspend_rx_ba_activity);
1753 wl1271_ps_elp_sleep(wl);
1754 mutex_unlock(&wl->mutex);
1757 wl1271_warning("couldn't prepare device to suspend");
1761 /* flush any remaining work */
1762 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1765 * disable and re-enable interrupts in order to flush
1768 wlcore_disable_interrupts(wl);
1771 * set suspended flag to avoid triggering a new threaded_irq
1772 * work. no need for spinlock as interrupts are disabled.
1774 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1776 wlcore_enable_interrupts(wl);
1777 flush_work(&wl->tx_work);
1778 flush_delayed_work(&wl->elp_work);
1781 * Cancel the watchdog even if above tx_flush failed. We will detect
1782 * it on resume anyway.
1784 cancel_delayed_work(&wl->tx_watchdog_work);
1789 static int wl1271_op_resume(struct ieee80211_hw *hw)
1791 struct wl1271 *wl = hw->priv;
1792 struct wl12xx_vif *wlvif;
1793 unsigned long flags;
1794 bool run_irq_work = false, pending_recovery;
1797 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1799 WARN_ON(!wl->wow_enabled);
1802 * re-enable irq_work enqueuing, and call irq_work directly if
1803 * there is a pending work.
1805 spin_lock_irqsave(&wl->wl_lock, flags);
1806 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1807 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1808 run_irq_work = true;
1809 spin_unlock_irqrestore(&wl->wl_lock, flags);
1811 mutex_lock(&wl->mutex);
1813 /* test the recovery flag before calling any SDIO functions */
1814 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1818 wl1271_debug(DEBUG_MAC80211,
1819 "run postponed irq_work directly");
1821 /* don't talk to the HW if recovery is pending */
1822 if (!pending_recovery) {
1823 ret = wlcore_irq_locked(wl);
1825 wl12xx_queue_recovery_work(wl);
1828 wlcore_enable_interrupts(wl);
1831 if (pending_recovery) {
1832 wl1271_warning("queuing forgotten recovery on resume");
1833 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1837 ret = wl1271_ps_elp_wakeup(wl);
1841 wl12xx_for_each_wlvif(wl, wlvif) {
1842 if (wlcore_is_p2p_mgmt(wlvif))
1845 wl1271_configure_resume(wl, wlvif);
1848 ret = wlcore_hw_interrupt_notify(wl, true);
1852 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1853 ret = wlcore_hw_rx_ba_filter(wl, false);
1858 wl1271_ps_elp_sleep(wl);
1861 wl->wow_enabled = false;
1864 * Set a flag to re-init the watchdog on the first Tx after resume.
1865 * That way we avoid possible conditions where Tx-complete interrupts
1866 * fail to arrive and we perform a spurious recovery.
1868 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1869 mutex_unlock(&wl->mutex);
1875 static int wl1271_op_start(struct ieee80211_hw *hw)
1877 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1880 * We have to delay the booting of the hardware because
1881 * we need to know the local MAC address before downloading and
1882 * initializing the firmware. The MAC address cannot be changed
1883 * after boot, and without the proper MAC address, the firmware
1884 * will not function properly.
1886 * The MAC address is first known when the corresponding interface
1887 * is added. That is where we will initialize the hardware.
1893 static void wlcore_op_stop_locked(struct wl1271 *wl)
1897 if (wl->state == WLCORE_STATE_OFF) {
1898 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1900 wlcore_enable_interrupts(wl);
1906 * this must be before the cancel_work calls below, so that the work
1907 * functions don't perform further work.
1909 wl->state = WLCORE_STATE_OFF;
1912 * Use the nosync variant to disable interrupts, so the mutex could be
1913 * held while doing so without deadlocking.
1915 wlcore_disable_interrupts_nosync(wl);
1917 mutex_unlock(&wl->mutex);
1919 wlcore_synchronize_interrupts(wl);
1920 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1921 cancel_work_sync(&wl->recovery_work);
1922 wl1271_flush_deferred_work(wl);
1923 cancel_delayed_work_sync(&wl->scan_complete_work);
1924 cancel_work_sync(&wl->netstack_work);
1925 cancel_work_sync(&wl->tx_work);
1926 cancel_delayed_work_sync(&wl->elp_work);
1927 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1929 /* let's notify MAC80211 about the remaining pending TX frames */
1930 mutex_lock(&wl->mutex);
1931 wl12xx_tx_reset(wl);
1933 wl1271_power_off(wl);
1935 * In case a recovery was scheduled, interrupts were disabled to avoid
1936 * an interrupt storm. Now that the power is down, it is safe to
1937 * re-enable interrupts to balance the disable depth
1939 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1940 wlcore_enable_interrupts(wl);
1942 wl->band = NL80211_BAND_2GHZ;
1945 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1946 wl->channel_type = NL80211_CHAN_NO_HT;
1947 wl->tx_blocks_available = 0;
1948 wl->tx_allocated_blocks = 0;
1949 wl->tx_results_count = 0;
1950 wl->tx_packets_count = 0;
1951 wl->time_offset = 0;
1952 wl->ap_fw_ps_map = 0;
1954 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1955 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1956 memset(wl->links_map, 0, sizeof(wl->links_map));
1957 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1958 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1959 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1960 wl->active_sta_count = 0;
1961 wl->active_link_count = 0;
1963 /* The system link is always allocated */
1964 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1965 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1966 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1969 * this is performed after the cancel_work calls and the associated
1970 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1971 * get executed before all these vars have been reset.
1975 wl->tx_blocks_freed = 0;
1977 for (i = 0; i < NUM_TX_QUEUES; i++) {
1978 wl->tx_pkts_freed[i] = 0;
1979 wl->tx_allocated_pkts[i] = 0;
1982 wl1271_debugfs_reset(wl);
1984 kfree(wl->raw_fw_status);
1985 wl->raw_fw_status = NULL;
1986 kfree(wl->fw_status);
1987 wl->fw_status = NULL;
1988 kfree(wl->tx_res_if);
1989 wl->tx_res_if = NULL;
1990 kfree(wl->target_mem_map);
1991 wl->target_mem_map = NULL;
1994 * FW channels must be re-calibrated after recovery,
1995 * save current Reg-Domain channel configuration and clear it.
1997 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1998 sizeof(wl->reg_ch_conf_pending));
1999 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2002 static void wlcore_op_stop(struct ieee80211_hw *hw)
2004 struct wl1271 *wl = hw->priv;
2006 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2008 mutex_lock(&wl->mutex);
2010 wlcore_op_stop_locked(wl);
2012 mutex_unlock(&wl->mutex);
2015 static void wlcore_channel_switch_work(struct work_struct *work)
2017 struct delayed_work *dwork;
2019 struct ieee80211_vif *vif;
2020 struct wl12xx_vif *wlvif;
2023 dwork = to_delayed_work(work);
2024 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2027 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2029 mutex_lock(&wl->mutex);
2031 if (unlikely(wl->state != WLCORE_STATE_ON))
2034 /* check the channel switch is still ongoing */
2035 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2038 vif = wl12xx_wlvif_to_vif(wlvif);
2039 ieee80211_chswitch_done(vif, false);
2041 ret = wl1271_ps_elp_wakeup(wl);
2045 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2047 wl1271_ps_elp_sleep(wl);
2049 mutex_unlock(&wl->mutex);
2052 static void wlcore_connection_loss_work(struct work_struct *work)
2054 struct delayed_work *dwork;
2056 struct ieee80211_vif *vif;
2057 struct wl12xx_vif *wlvif;
2059 dwork = to_delayed_work(work);
2060 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2063 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2065 mutex_lock(&wl->mutex);
2067 if (unlikely(wl->state != WLCORE_STATE_ON))
2070 /* Call mac80211 connection loss */
2071 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2074 vif = wl12xx_wlvif_to_vif(wlvif);
2075 ieee80211_connection_loss(vif);
2077 mutex_unlock(&wl->mutex);
2080 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2082 struct delayed_work *dwork;
2084 struct wl12xx_vif *wlvif;
2085 unsigned long time_spare;
2088 dwork = to_delayed_work(work);
2089 wlvif = container_of(dwork, struct wl12xx_vif,
2090 pending_auth_complete_work);
2093 mutex_lock(&wl->mutex);
2095 if (unlikely(wl->state != WLCORE_STATE_ON))
2099 * Make sure a second really passed since the last auth reply. Maybe
2100 * a second auth reply arrived while we were stuck on the mutex.
2101 * Check for a little less than the timeout to protect from scheduler
2104 time_spare = jiffies +
2105 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2106 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2109 ret = wl1271_ps_elp_wakeup(wl);
2113 /* cancel the ROC if active */
2114 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2116 wl1271_ps_elp_sleep(wl);
2118 mutex_unlock(&wl->mutex);
2121 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2123 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2124 WL12XX_MAX_RATE_POLICIES);
2125 if (policy >= WL12XX_MAX_RATE_POLICIES)
2128 __set_bit(policy, wl->rate_policies_map);
2133 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2135 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2138 __clear_bit(*idx, wl->rate_policies_map);
2139 *idx = WL12XX_MAX_RATE_POLICIES;
2142 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2144 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2145 WLCORE_MAX_KLV_TEMPLATES);
2146 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2149 __set_bit(policy, wl->klv_templates_map);
2154 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2156 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2159 __clear_bit(*idx, wl->klv_templates_map);
2160 *idx = WLCORE_MAX_KLV_TEMPLATES;
2163 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2165 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2167 switch (wlvif->bss_type) {
2168 case BSS_TYPE_AP_BSS:
2170 return WL1271_ROLE_P2P_GO;
2171 else if (ieee80211_vif_is_mesh(vif))
2172 return WL1271_ROLE_MESH_POINT;
2174 return WL1271_ROLE_AP;
2176 case BSS_TYPE_STA_BSS:
2178 return WL1271_ROLE_P2P_CL;
2180 return WL1271_ROLE_STA;
2183 return WL1271_ROLE_IBSS;
2186 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2188 return WL12XX_INVALID_ROLE_TYPE;
2191 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2193 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2196 /* clear everything but the persistent data */
2197 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2199 switch (ieee80211_vif_type_p2p(vif)) {
2200 case NL80211_IFTYPE_P2P_CLIENT:
2203 case NL80211_IFTYPE_STATION:
2204 case NL80211_IFTYPE_P2P_DEVICE:
2205 wlvif->bss_type = BSS_TYPE_STA_BSS;
2207 case NL80211_IFTYPE_ADHOC:
2208 wlvif->bss_type = BSS_TYPE_IBSS;
2210 case NL80211_IFTYPE_P2P_GO:
2213 case NL80211_IFTYPE_AP:
2214 case NL80211_IFTYPE_MESH_POINT:
2215 wlvif->bss_type = BSS_TYPE_AP_BSS;
2218 wlvif->bss_type = MAX_BSS_TYPE;
2222 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2223 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2224 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2226 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2227 wlvif->bss_type == BSS_TYPE_IBSS) {
2228 /* init sta/ibss data */
2229 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2230 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2231 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2232 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2233 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2234 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2235 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2236 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2239 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2240 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2241 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2242 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2243 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2244 wl12xx_allocate_rate_policy(wl,
2245 &wlvif->ap.ucast_rate_idx[i]);
2246 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2248 * TODO: check if basic_rate shouldn't be
2249 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2250 * instead (the same thing for STA above).
2252 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2253 /* TODO: this seems to be used only for STA, check it */
2254 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2257 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2258 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2259 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2262 * mac80211 configures some values globally, while we treat them
2263 * per-interface. thus, on init, we have to copy them from wl
2265 wlvif->band = wl->band;
2266 wlvif->channel = wl->channel;
2267 wlvif->power_level = wl->power_level;
2268 wlvif->channel_type = wl->channel_type;
2270 INIT_WORK(&wlvif->rx_streaming_enable_work,
2271 wl1271_rx_streaming_enable_work);
2272 INIT_WORK(&wlvif->rx_streaming_disable_work,
2273 wl1271_rx_streaming_disable_work);
2274 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2275 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2276 wlcore_channel_switch_work);
2277 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2278 wlcore_connection_loss_work);
2279 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2280 wlcore_pending_auth_complete_work);
2281 INIT_LIST_HEAD(&wlvif->list);
2283 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2284 (unsigned long) wlvif);
2288 static int wl12xx_init_fw(struct wl1271 *wl)
2290 int retries = WL1271_BOOT_RETRIES;
2291 bool booted = false;
2292 struct wiphy *wiphy = wl->hw->wiphy;
2297 ret = wl12xx_chip_wakeup(wl, false);
2301 ret = wl->ops->boot(wl);
2305 ret = wl1271_hw_init(wl);
2313 mutex_unlock(&wl->mutex);
2314 /* Unlocking the mutex in the middle of handling is
2315 inherently unsafe. In this case we deem it safe to do,
2316 because we need to let any possibly pending IRQ out of
2317 the system (and while we are WLCORE_STATE_OFF the IRQ
2318 work function will not do anything.) Also, any other
2319 possible concurrent operations will fail due to the
2320 current state, hence the wl1271 struct should be safe. */
2321 wlcore_disable_interrupts(wl);
2322 wl1271_flush_deferred_work(wl);
2323 cancel_work_sync(&wl->netstack_work);
2324 mutex_lock(&wl->mutex);
2326 wl1271_power_off(wl);
2330 wl1271_error("firmware boot failed despite %d retries",
2331 WL1271_BOOT_RETRIES);
2335 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2337 /* update hw/fw version info in wiphy struct */
2338 wiphy->hw_version = wl->chip.id;
2339 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2340 sizeof(wiphy->fw_version));
2343 * Now we know if 11a is supported (info from the NVS), so disable
2344 * 11a channels if not supported
2346 if (!wl->enable_11a)
2347 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2349 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2350 wl->enable_11a ? "" : "not ");
2352 wl->state = WLCORE_STATE_ON;
2357 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2359 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2363 * Check whether a fw switch (i.e. moving from one loaded
2364 * fw to another) is needed. This function is also responsible
2365 * for updating wl->last_vif_count, so it must be called before
2366 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2369 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2370 struct vif_counter_data vif_counter_data,
2373 enum wl12xx_fw_type current_fw = wl->fw_type;
2374 u8 vif_count = vif_counter_data.counter;
2376 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2379 /* increase the vif count if this is a new vif */
2380 if (add && !vif_counter_data.cur_vif_running)
2383 wl->last_vif_count = vif_count;
2385 /* no need for fw change if the device is OFF */
2386 if (wl->state == WLCORE_STATE_OFF)
2389 /* no need for fw change if a single fw is used */
2390 if (!wl->mr_fw_name)
2393 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2395 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2402 * Enter "forced psm". Make sure the sta is in psm against the ap,
2403 * to make the fw switch a bit more disconnection-persistent.
2405 static void wl12xx_force_active_psm(struct wl1271 *wl)
2407 struct wl12xx_vif *wlvif;
2409 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2410 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2414 struct wlcore_hw_queue_iter_data {
2415 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2417 struct ieee80211_vif *vif;
2418 /* is the current vif among those iterated */
2422 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2423 struct ieee80211_vif *vif)
2425 struct wlcore_hw_queue_iter_data *iter_data = data;
2427 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2428 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2431 if (iter_data->cur_running || vif == iter_data->vif) {
2432 iter_data->cur_running = true;
2436 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2439 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2440 struct wl12xx_vif *wlvif)
2442 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2443 struct wlcore_hw_queue_iter_data iter_data = {};
2446 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2447 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2451 iter_data.vif = vif;
2453 /* mark all bits taken by active interfaces */
2454 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2455 IEEE80211_IFACE_ITER_RESUME_ALL,
2456 wlcore_hw_queue_iter, &iter_data);
2458 /* the current vif is already running in mac80211 (resume/recovery) */
2459 if (iter_data.cur_running) {
2460 wlvif->hw_queue_base = vif->hw_queue[0];
2461 wl1271_debug(DEBUG_MAC80211,
2462 "using pre-allocated hw queue base %d",
2463 wlvif->hw_queue_base);
2465 /* interface type might have changed type */
2466 goto adjust_cab_queue;
2469 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2470 WLCORE_NUM_MAC_ADDRESSES);
2471 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2474 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2475 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2476 wlvif->hw_queue_base);
2478 for (i = 0; i < NUM_TX_QUEUES; i++) {
2479 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2480 /* register hw queues in mac80211 */
2481 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2485 /* the last places are reserved for cab queues per interface */
2486 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2487 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2488 wlvif->hw_queue_base / NUM_TX_QUEUES;
2490 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2495 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2496 struct ieee80211_vif *vif)
2498 struct wl1271 *wl = hw->priv;
2499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2500 struct vif_counter_data vif_count;
2505 wl1271_error("Adding Interface not allowed while in PLT mode");
2509 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2510 IEEE80211_VIF_SUPPORTS_UAPSD |
2511 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2513 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2514 ieee80211_vif_type_p2p(vif), vif->addr);
2516 wl12xx_get_vif_count(hw, vif, &vif_count);
2518 mutex_lock(&wl->mutex);
2519 ret = wl1271_ps_elp_wakeup(wl);
2524 * in some very corner case HW recovery scenarios its possible to
2525 * get here before __wl1271_op_remove_interface is complete, so
2526 * opt out if that is the case.
2528 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2529 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2535 ret = wl12xx_init_vif_data(wl, vif);
2540 role_type = wl12xx_get_role_type(wl, wlvif);
2541 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2546 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2550 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2551 wl12xx_force_active_psm(wl);
2552 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2553 mutex_unlock(&wl->mutex);
2554 wl1271_recovery_work(&wl->recovery_work);
2559 * TODO: after the nvs issue will be solved, move this block
2560 * to start(), and make sure here the driver is ON.
2562 if (wl->state == WLCORE_STATE_OFF) {
2564 * we still need this in order to configure the fw
2565 * while uploading the nvs
2567 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2569 ret = wl12xx_init_fw(wl);
2574 if (!wlcore_is_p2p_mgmt(wlvif)) {
2575 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2576 role_type, &wlvif->role_id);
2580 ret = wl1271_init_vif_specific(wl, vif);
2585 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2586 &wlvif->dev_role_id);
2590 /* needed mainly for configuring rate policies */
2591 ret = wl1271_sta_hw_init(wl, wlvif);
2596 list_add(&wlvif->list, &wl->wlvif_list);
2597 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2599 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2604 wl1271_ps_elp_sleep(wl);
2606 mutex_unlock(&wl->mutex);
2611 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2612 struct ieee80211_vif *vif,
2613 bool reset_tx_queues)
2615 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2617 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2619 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2621 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2624 /* because of hardware recovery, we may get here twice */
2625 if (wl->state == WLCORE_STATE_OFF)
2628 wl1271_info("down");
2630 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2631 wl->scan_wlvif == wlvif) {
2632 struct cfg80211_scan_info info = {
2637 * Rearm the tx watchdog just before idling scan. This
2638 * prevents just-finished scans from triggering the watchdog
2640 wl12xx_rearm_tx_watchdog_locked(wl);
2642 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2643 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2644 wl->scan_wlvif = NULL;
2645 wl->scan.req = NULL;
2646 ieee80211_scan_completed(wl->hw, &info);
2649 if (wl->sched_vif == wlvif)
2650 wl->sched_vif = NULL;
2652 if (wl->roc_vif == vif) {
2654 ieee80211_remain_on_channel_expired(wl->hw);
2657 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2658 /* disable active roles */
2659 ret = wl1271_ps_elp_wakeup(wl);
2663 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2664 wlvif->bss_type == BSS_TYPE_IBSS) {
2665 if (wl12xx_dev_role_started(wlvif))
2666 wl12xx_stop_dev(wl, wlvif);
2669 if (!wlcore_is_p2p_mgmt(wlvif)) {
2670 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2674 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2679 wl1271_ps_elp_sleep(wl);
2682 wl12xx_tx_reset_wlvif(wl, wlvif);
2684 /* clear all hlids (except system_hlid) */
2685 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2687 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2688 wlvif->bss_type == BSS_TYPE_IBSS) {
2689 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2690 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2691 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2692 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2693 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2695 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2696 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2697 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2698 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2699 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2700 wl12xx_free_rate_policy(wl,
2701 &wlvif->ap.ucast_rate_idx[i]);
2702 wl1271_free_ap_keys(wl, wlvif);
2705 dev_kfree_skb(wlvif->probereq);
2706 wlvif->probereq = NULL;
2707 if (wl->last_wlvif == wlvif)
2708 wl->last_wlvif = NULL;
2709 list_del(&wlvif->list);
2710 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2711 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2712 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2720 * Last AP, have more stations. Configure sleep auth according to STA.
2721 * Don't do thin on unintended recovery.
2723 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2724 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2727 if (wl->ap_count == 0 && is_ap) {
2728 /* mask ap events */
2729 wl->event_mask &= ~wl->ap_event_mask;
2730 wl1271_event_unmask(wl);
2733 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2734 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2735 /* Configure for power according to debugfs */
2736 if (sta_auth != WL1271_PSM_ILLEGAL)
2737 wl1271_acx_sleep_auth(wl, sta_auth);
2738 /* Configure for ELP power saving */
2740 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2744 mutex_unlock(&wl->mutex);
2746 del_timer_sync(&wlvif->rx_streaming_timer);
2747 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2748 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2749 cancel_work_sync(&wlvif->rc_update_work);
2750 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2751 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2752 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2754 mutex_lock(&wl->mutex);
2757 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2758 struct ieee80211_vif *vif)
2760 struct wl1271 *wl = hw->priv;
2761 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2762 struct wl12xx_vif *iter;
2763 struct vif_counter_data vif_count;
2765 wl12xx_get_vif_count(hw, vif, &vif_count);
2766 mutex_lock(&wl->mutex);
2768 if (wl->state == WLCORE_STATE_OFF ||
2769 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2773 * wl->vif can be null here if someone shuts down the interface
2774 * just when hardware recovery has been started.
2776 wl12xx_for_each_wlvif(wl, iter) {
2780 __wl1271_op_remove_interface(wl, vif, true);
2783 WARN_ON(iter != wlvif);
2784 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2785 wl12xx_force_active_psm(wl);
2786 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2787 wl12xx_queue_recovery_work(wl);
2790 mutex_unlock(&wl->mutex);
2793 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2794 struct ieee80211_vif *vif,
2795 enum nl80211_iftype new_type, bool p2p)
2797 struct wl1271 *wl = hw->priv;
2800 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2801 wl1271_op_remove_interface(hw, vif);
2803 vif->type = new_type;
2805 ret = wl1271_op_add_interface(hw, vif);
2807 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2811 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2814 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2817 * One of the side effects of the JOIN command is that is clears
2818 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2819 * to a WPA/WPA2 access point will therefore kill the data-path.
2820 * Currently the only valid scenario for JOIN during association
2821 * is on roaming, in which case we will also be given new keys.
2822 * Keep the below message for now, unless it starts bothering
2823 * users who really like to roam a lot :)
2825 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2826 wl1271_info("JOIN while associated.");
2828 /* clear encryption type */
2829 wlvif->encryption_type = KEY_NONE;
2832 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2834 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2836 * TODO: this is an ugly workaround for wl12xx fw
2837 * bug - we are not able to tx/rx after the first
2838 * start_sta, so make dummy start+stop calls,
2839 * and then call start_sta again.
2840 * this should be fixed in the fw.
2842 wl12xx_cmd_role_start_sta(wl, wlvif);
2843 wl12xx_cmd_role_stop_sta(wl, wlvif);
2846 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2852 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2856 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2860 wl1271_error("No SSID in IEs!");
2865 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2866 wl1271_error("SSID is too long!");
2870 wlvif->ssid_len = ssid_len;
2871 memcpy(wlvif->ssid, ptr+2, ssid_len);
2875 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2877 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2878 struct sk_buff *skb;
2881 /* we currently only support setting the ssid from the ap probe req */
2882 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2885 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2889 ieoffset = offsetof(struct ieee80211_mgmt,
2890 u.probe_req.variable);
2891 wl1271_ssid_set(wlvif, skb, ieoffset);
2897 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2898 struct ieee80211_bss_conf *bss_conf,
2904 wlvif->aid = bss_conf->aid;
2905 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2906 wlvif->beacon_int = bss_conf->beacon_int;
2907 wlvif->wmm_enabled = bss_conf->qos;
2909 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2912 * with wl1271, we don't need to update the
2913 * beacon_int and dtim_period, because the firmware
2914 * updates it by itself when the first beacon is
2915 * received after a join.
2917 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2922 * Get a template for hardware connection maintenance
2924 dev_kfree_skb(wlvif->probereq);
2925 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2928 ieoffset = offsetof(struct ieee80211_mgmt,
2929 u.probe_req.variable);
2930 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2932 /* enable the connection monitoring feature */
2933 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2938 * The join command disable the keep-alive mode, shut down its process,
2939 * and also clear the template config, so we need to reset it all after
2940 * the join. The acx_aid starts the keep-alive process, and the order
2941 * of the commands below is relevant.
2943 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2947 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2951 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2955 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2956 wlvif->sta.klv_template_id,
2957 ACX_KEEP_ALIVE_TPL_VALID);
2962 * The default fw psm configuration is AUTO, while mac80211 default
2963 * setting is off (ACTIVE), so sync the fw with the correct value.
2965 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2971 wl1271_tx_enabled_rates_get(wl,
2974 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2982 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2985 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2987 /* make sure we are connected (sta) joined */
2989 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2992 /* make sure we are joined (ibss) */
2994 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2998 /* use defaults when not associated */
3001 /* free probe-request template */
3002 dev_kfree_skb(wlvif->probereq);
3003 wlvif->probereq = NULL;
3005 /* disable connection monitor features */
3006 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3010 /* Disable the keep-alive feature */
3011 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3015 /* disable beacon filtering */
3016 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3021 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3022 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3024 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3025 ieee80211_chswitch_done(vif, false);
3026 cancel_delayed_work(&wlvif->channel_switch_work);
3029 /* invalidate keep-alive template */
3030 wl1271_acx_keep_alive_config(wl, wlvif,
3031 wlvif->sta.klv_template_id,
3032 ACX_KEEP_ALIVE_TPL_INVALID);
3037 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3039 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3040 wlvif->rate_set = wlvif->basic_rate_set;
3043 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3046 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3048 if (idle == cur_idle)
3052 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3054 /* The current firmware only supports sched_scan in idle */
3055 if (wl->sched_vif == wlvif)
3056 wl->ops->sched_scan_stop(wl, wlvif);
3058 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3062 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3063 struct ieee80211_conf *conf, u32 changed)
3067 if (wlcore_is_p2p_mgmt(wlvif))
3070 if (conf->power_level != wlvif->power_level) {
3071 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3075 wlvif->power_level = conf->power_level;
3081 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3083 struct wl1271 *wl = hw->priv;
3084 struct wl12xx_vif *wlvif;
3085 struct ieee80211_conf *conf = &hw->conf;
3088 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3090 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3092 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3095 mutex_lock(&wl->mutex);
3097 if (changed & IEEE80211_CONF_CHANGE_POWER)
3098 wl->power_level = conf->power_level;
3100 if (unlikely(wl->state != WLCORE_STATE_ON))
3103 ret = wl1271_ps_elp_wakeup(wl);
3107 /* configure each interface */
3108 wl12xx_for_each_wlvif(wl, wlvif) {
3109 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3115 wl1271_ps_elp_sleep(wl);
3118 mutex_unlock(&wl->mutex);
3123 struct wl1271_filter_params {
3126 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3129 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3130 struct netdev_hw_addr_list *mc_list)
3132 struct wl1271_filter_params *fp;
3133 struct netdev_hw_addr *ha;
3135 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3137 wl1271_error("Out of memory setting filters.");
3141 /* update multicast filtering parameters */
3142 fp->mc_list_length = 0;
3143 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3144 fp->enabled = false;
3147 netdev_hw_addr_list_for_each(ha, mc_list) {
3148 memcpy(fp->mc_list[fp->mc_list_length],
3149 ha->addr, ETH_ALEN);
3150 fp->mc_list_length++;
3154 return (u64)(unsigned long)fp;
3157 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3159 FIF_BCN_PRBRESP_PROMISC | \
3163 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3164 unsigned int changed,
3165 unsigned int *total, u64 multicast)
3167 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3168 struct wl1271 *wl = hw->priv;
3169 struct wl12xx_vif *wlvif;
3173 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3174 " total %x", changed, *total);
3176 mutex_lock(&wl->mutex);
3178 *total &= WL1271_SUPPORTED_FILTERS;
3179 changed &= WL1271_SUPPORTED_FILTERS;
3181 if (unlikely(wl->state != WLCORE_STATE_ON))
3184 ret = wl1271_ps_elp_wakeup(wl);
3188 wl12xx_for_each_wlvif(wl, wlvif) {
3189 if (wlcore_is_p2p_mgmt(wlvif))
3192 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3193 if (*total & FIF_ALLMULTI)
3194 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3198 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3201 fp->mc_list_length);
3208 * the fw doesn't provide an api to configure the filters. instead,
3209 * the filters configuration is based on the active roles / ROC
3214 wl1271_ps_elp_sleep(wl);
3217 mutex_unlock(&wl->mutex);
3221 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3222 u8 id, u8 key_type, u8 key_size,
3223 const u8 *key, u8 hlid, u32 tx_seq_32,
3226 struct wl1271_ap_key *ap_key;
3229 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3231 if (key_size > MAX_KEY_SIZE)
3235 * Find next free entry in ap_keys. Also check we are not replacing
3238 for (i = 0; i < MAX_NUM_KEYS; i++) {
3239 if (wlvif->ap.recorded_keys[i] == NULL)
3242 if (wlvif->ap.recorded_keys[i]->id == id) {
3243 wl1271_warning("trying to record key replacement");
3248 if (i == MAX_NUM_KEYS)
3251 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3256 ap_key->key_type = key_type;
3257 ap_key->key_size = key_size;
3258 memcpy(ap_key->key, key, key_size);
3259 ap_key->hlid = hlid;
3260 ap_key->tx_seq_32 = tx_seq_32;
3261 ap_key->tx_seq_16 = tx_seq_16;
3263 wlvif->ap.recorded_keys[i] = ap_key;
3267 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3271 for (i = 0; i < MAX_NUM_KEYS; i++) {
3272 kfree(wlvif->ap.recorded_keys[i]);
3273 wlvif->ap.recorded_keys[i] = NULL;
3277 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3280 struct wl1271_ap_key *key;
3281 bool wep_key_added = false;
3283 for (i = 0; i < MAX_NUM_KEYS; i++) {
3285 if (wlvif->ap.recorded_keys[i] == NULL)
3288 key = wlvif->ap.recorded_keys[i];
3290 if (hlid == WL12XX_INVALID_LINK_ID)
3291 hlid = wlvif->ap.bcast_hlid;
3293 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3294 key->id, key->key_type,
3295 key->key_size, key->key,
3296 hlid, key->tx_seq_32,
3301 if (key->key_type == KEY_WEP)
3302 wep_key_added = true;
3305 if (wep_key_added) {
3306 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3307 wlvif->ap.bcast_hlid);
3313 wl1271_free_ap_keys(wl, wlvif);
3317 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3318 u16 action, u8 id, u8 key_type,
3319 u8 key_size, const u8 *key, u32 tx_seq_32,
3320 u16 tx_seq_16, struct ieee80211_sta *sta)
3323 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3326 struct wl1271_station *wl_sta;
3330 wl_sta = (struct wl1271_station *)sta->drv_priv;
3331 hlid = wl_sta->hlid;
3333 hlid = wlvif->ap.bcast_hlid;
3336 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3338 * We do not support removing keys after AP shutdown.
3339 * Pretend we do to make mac80211 happy.
3341 if (action != KEY_ADD_OR_REPLACE)
3344 ret = wl1271_record_ap_key(wl, wlvif, id,
3346 key, hlid, tx_seq_32,
3349 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3350 id, key_type, key_size,
3351 key, hlid, tx_seq_32,
3359 static const u8 bcast_addr[ETH_ALEN] = {
3360 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3363 addr = sta ? sta->addr : bcast_addr;
3365 if (is_zero_ether_addr(addr)) {
3366 /* We dont support TX only encryption */
3370 /* The wl1271 does not allow to remove unicast keys - they
3371 will be cleared automatically on next CMD_JOIN. Ignore the
3372 request silently, as we dont want the mac80211 to emit
3373 an error message. */
3374 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3377 /* don't remove key if hlid was already deleted */
3378 if (action == KEY_REMOVE &&
3379 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3382 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3383 id, key_type, key_size,
3384 key, addr, tx_seq_32,
3394 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3395 struct ieee80211_vif *vif,
3396 struct ieee80211_sta *sta,
3397 struct ieee80211_key_conf *key_conf)
3399 struct wl1271 *wl = hw->priv;
3401 bool might_change_spare =
3402 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3403 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3405 if (might_change_spare) {
3407 * stop the queues and flush to ensure the next packets are
3408 * in sync with FW spare block accounting
3410 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3411 wl1271_tx_flush(wl);
3414 mutex_lock(&wl->mutex);
3416 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3418 goto out_wake_queues;
3421 ret = wl1271_ps_elp_wakeup(wl);
3423 goto out_wake_queues;
3425 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3427 wl1271_ps_elp_sleep(wl);
3430 if (might_change_spare)
3431 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3433 mutex_unlock(&wl->mutex);
3438 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3439 struct ieee80211_vif *vif,
3440 struct ieee80211_sta *sta,
3441 struct ieee80211_key_conf *key_conf)
3443 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3450 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3452 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3453 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3454 key_conf->cipher, key_conf->keyidx,
3455 key_conf->keylen, key_conf->flags);
3456 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3458 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3460 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3461 hlid = wl_sta->hlid;
3463 hlid = wlvif->ap.bcast_hlid;
3466 hlid = wlvif->sta.hlid;
3468 if (hlid != WL12XX_INVALID_LINK_ID) {
3469 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3470 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3471 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3474 switch (key_conf->cipher) {
3475 case WLAN_CIPHER_SUITE_WEP40:
3476 case WLAN_CIPHER_SUITE_WEP104:
3479 key_conf->hw_key_idx = key_conf->keyidx;
3481 case WLAN_CIPHER_SUITE_TKIP:
3482 key_type = KEY_TKIP;
3483 key_conf->hw_key_idx = key_conf->keyidx;
3485 case WLAN_CIPHER_SUITE_CCMP:
3487 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3489 case WL1271_CIPHER_SUITE_GEM:
3493 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3500 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3501 key_conf->keyidx, key_type,
3502 key_conf->keylen, key_conf->key,
3503 tx_seq_32, tx_seq_16, sta);
3505 wl1271_error("Could not add or replace key");
3510 * reconfiguring arp response if the unicast (or common)
3511 * encryption key type was changed
3513 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3514 (sta || key_type == KEY_WEP) &&
3515 wlvif->encryption_type != key_type) {
3516 wlvif->encryption_type = key_type;
3517 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3519 wl1271_warning("build arp rsp failed: %d", ret);
3526 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3527 key_conf->keyidx, key_type,
3528 key_conf->keylen, key_conf->key,
3531 wl1271_error("Could not remove key");
3537 wl1271_error("Unsupported key cmd 0x%x", cmd);
3543 EXPORT_SYMBOL_GPL(wlcore_set_key);
3545 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3546 struct ieee80211_vif *vif,
3549 struct wl1271 *wl = hw->priv;
3550 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3553 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3556 /* we don't handle unsetting of default key */
3560 mutex_lock(&wl->mutex);
3562 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3567 ret = wl1271_ps_elp_wakeup(wl);
3571 wlvif->default_key = key_idx;
3573 /* the default WEP key needs to be configured at least once */
3574 if (wlvif->encryption_type == KEY_WEP) {
3575 ret = wl12xx_cmd_set_default_wep_key(wl,
3583 wl1271_ps_elp_sleep(wl);
3586 mutex_unlock(&wl->mutex);
3589 void wlcore_regdomain_config(struct wl1271 *wl)
3593 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3596 mutex_lock(&wl->mutex);
3598 if (unlikely(wl->state != WLCORE_STATE_ON))
3601 ret = wl1271_ps_elp_wakeup(wl);
3605 ret = wlcore_cmd_regdomain_config_locked(wl);
3607 wl12xx_queue_recovery_work(wl);
3611 wl1271_ps_elp_sleep(wl);
3613 mutex_unlock(&wl->mutex);
3616 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3617 struct ieee80211_vif *vif,
3618 struct ieee80211_scan_request *hw_req)
3620 struct cfg80211_scan_request *req = &hw_req->req;
3621 struct wl1271 *wl = hw->priv;
3626 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3629 ssid = req->ssids[0].ssid;
3630 len = req->ssids[0].ssid_len;
3633 mutex_lock(&wl->mutex);
3635 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3637 * We cannot return -EBUSY here because cfg80211 will expect
3638 * a call to ieee80211_scan_completed if we do - in this case
3639 * there won't be any call.
3645 ret = wl1271_ps_elp_wakeup(wl);
3649 /* fail if there is any role in ROC */
3650 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3651 /* don't allow scanning right now */
3656 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3658 wl1271_ps_elp_sleep(wl);
3660 mutex_unlock(&wl->mutex);
3665 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3666 struct ieee80211_vif *vif)
3668 struct wl1271 *wl = hw->priv;
3669 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3670 struct cfg80211_scan_info info = {
3675 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3677 mutex_lock(&wl->mutex);
3679 if (unlikely(wl->state != WLCORE_STATE_ON))
3682 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3685 ret = wl1271_ps_elp_wakeup(wl);
3689 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3690 ret = wl->ops->scan_stop(wl, wlvif);
3696 * Rearm the tx watchdog just before idling scan. This
3697 * prevents just-finished scans from triggering the watchdog
3699 wl12xx_rearm_tx_watchdog_locked(wl);
3701 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3702 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3703 wl->scan_wlvif = NULL;
3704 wl->scan.req = NULL;
3705 ieee80211_scan_completed(wl->hw, &info);
3708 wl1271_ps_elp_sleep(wl);
3710 mutex_unlock(&wl->mutex);
3712 cancel_delayed_work_sync(&wl->scan_complete_work);
3715 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3716 struct ieee80211_vif *vif,
3717 struct cfg80211_sched_scan_request *req,
3718 struct ieee80211_scan_ies *ies)
3720 struct wl1271 *wl = hw->priv;
3721 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3724 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3726 mutex_lock(&wl->mutex);
3728 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3733 ret = wl1271_ps_elp_wakeup(wl);
3737 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3741 wl->sched_vif = wlvif;
3744 wl1271_ps_elp_sleep(wl);
3746 mutex_unlock(&wl->mutex);
3750 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3751 struct ieee80211_vif *vif)
3753 struct wl1271 *wl = hw->priv;
3754 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3757 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3759 mutex_lock(&wl->mutex);
3761 if (unlikely(wl->state != WLCORE_STATE_ON))
3764 ret = wl1271_ps_elp_wakeup(wl);
3768 wl->ops->sched_scan_stop(wl, wlvif);
3770 wl1271_ps_elp_sleep(wl);
3772 mutex_unlock(&wl->mutex);
3777 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3779 struct wl1271 *wl = hw->priv;
3782 mutex_lock(&wl->mutex);
3784 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3789 ret = wl1271_ps_elp_wakeup(wl);
3793 ret = wl1271_acx_frag_threshold(wl, value);
3795 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3797 wl1271_ps_elp_sleep(wl);
3800 mutex_unlock(&wl->mutex);
3805 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3807 struct wl1271 *wl = hw->priv;
3808 struct wl12xx_vif *wlvif;
3811 mutex_lock(&wl->mutex);
3813 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3818 ret = wl1271_ps_elp_wakeup(wl);
3822 wl12xx_for_each_wlvif(wl, wlvif) {
3823 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3825 wl1271_warning("set rts threshold failed: %d", ret);
3827 wl1271_ps_elp_sleep(wl);
3830 mutex_unlock(&wl->mutex);
3835 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3838 const u8 *next, *end = skb->data + skb->len;
3839 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3840 skb->len - ieoffset);
3845 memmove(ie, next, end - next);
3846 skb_trim(skb, skb->len - len);
3849 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3850 unsigned int oui, u8 oui_type,
3854 const u8 *next, *end = skb->data + skb->len;
3855 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3856 skb->data + ieoffset,
3857 skb->len - ieoffset);
3862 memmove(ie, next, end - next);
3863 skb_trim(skb, skb->len - len);
3866 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3867 struct ieee80211_vif *vif)
3869 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3870 struct sk_buff *skb;
3873 skb = ieee80211_proberesp_get(wl->hw, vif);
3877 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3878 CMD_TEMPL_AP_PROBE_RESPONSE,
3887 wl1271_debug(DEBUG_AP, "probe response updated");
3888 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3894 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3895 struct ieee80211_vif *vif,
3897 size_t probe_rsp_len,
3900 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3901 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3902 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3903 int ssid_ie_offset, ie_offset, templ_len;
3906 /* no need to change probe response if the SSID is set correctly */
3907 if (wlvif->ssid_len > 0)
3908 return wl1271_cmd_template_set(wl, wlvif->role_id,
3909 CMD_TEMPL_AP_PROBE_RESPONSE,
3914 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3915 wl1271_error("probe_rsp template too big");
3919 /* start searching from IE offset */
3920 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3922 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3923 probe_rsp_len - ie_offset);
3925 wl1271_error("No SSID in beacon!");
3929 ssid_ie_offset = ptr - probe_rsp_data;
3930 ptr += (ptr[1] + 2);
3932 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3934 /* insert SSID from bss_conf */
3935 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3936 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3937 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3938 bss_conf->ssid, bss_conf->ssid_len);
3939 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3941 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3942 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3943 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3945 return wl1271_cmd_template_set(wl, wlvif->role_id,
3946 CMD_TEMPL_AP_PROBE_RESPONSE,
3952 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3953 struct ieee80211_vif *vif,
3954 struct ieee80211_bss_conf *bss_conf,
3957 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3960 if (changed & BSS_CHANGED_ERP_SLOT) {
3961 if (bss_conf->use_short_slot)
3962 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3964 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3966 wl1271_warning("Set slot time failed %d", ret);
3971 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3972 if (bss_conf->use_short_preamble)
3973 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3975 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3978 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3979 if (bss_conf->use_cts_prot)
3980 ret = wl1271_acx_cts_protect(wl, wlvif,
3983 ret = wl1271_acx_cts_protect(wl, wlvif,
3984 CTSPROTECT_DISABLE);
3986 wl1271_warning("Set ctsprotect failed %d", ret);
3995 static int wlcore_set_beacon_template(struct wl1271 *wl,
3996 struct ieee80211_vif *vif,
3999 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4000 struct ieee80211_hdr *hdr;
4003 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4004 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4012 wl1271_debug(DEBUG_MASTER, "beacon updated");
4014 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4016 dev_kfree_skb(beacon);
4019 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4020 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4022 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4027 dev_kfree_skb(beacon);
4031 wlvif->wmm_enabled =
4032 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4033 WLAN_OUI_TYPE_MICROSOFT_WMM,
4034 beacon->data + ieoffset,
4035 beacon->len - ieoffset);
4038 * In case we already have a probe-resp beacon set explicitly
4039 * by usermode, don't use the beacon data.
4041 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4044 /* remove TIM ie from probe response */
4045 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4048 * remove p2p ie from probe response.
4049 * the fw reponds to probe requests that don't include
4050 * the p2p ie. probe requests with p2p ie will be passed,
4051 * and will be responded by the supplicant (the spec
4052 * forbids including the p2p ie when responding to probe
4053 * requests that didn't include it).
4055 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4056 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4058 hdr = (struct ieee80211_hdr *) beacon->data;
4059 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4060 IEEE80211_STYPE_PROBE_RESP);
4062 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4067 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4068 CMD_TEMPL_PROBE_RESPONSE,
4073 dev_kfree_skb(beacon);
4081 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4082 struct ieee80211_vif *vif,
4083 struct ieee80211_bss_conf *bss_conf,
4086 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4087 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4090 if (changed & BSS_CHANGED_BEACON_INT) {
4091 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4092 bss_conf->beacon_int);
4094 wlvif->beacon_int = bss_conf->beacon_int;
4097 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4098 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4100 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4103 if (changed & BSS_CHANGED_BEACON) {
4104 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4108 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4110 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4117 wl1271_error("beacon info change failed: %d", ret);
4121 /* AP mode changes */
4122 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4123 struct ieee80211_vif *vif,
4124 struct ieee80211_bss_conf *bss_conf,
4127 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4130 if (changed & BSS_CHANGED_BASIC_RATES) {
4131 u32 rates = bss_conf->basic_rates;
4133 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4135 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4136 wlvif->basic_rate_set);
4138 ret = wl1271_init_ap_rates(wl, wlvif);
4140 wl1271_error("AP rate policy change failed %d", ret);
4144 ret = wl1271_ap_init_templates(wl, vif);
4148 /* No need to set probe resp template for mesh */
4149 if (!ieee80211_vif_is_mesh(vif)) {
4150 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4157 ret = wlcore_set_beacon_template(wl, vif, true);
4162 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4166 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4167 if (bss_conf->enable_beacon) {
4168 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4169 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4173 ret = wl1271_ap_init_hwenc(wl, wlvif);
4177 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4178 wl1271_debug(DEBUG_AP, "started AP");
4181 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4183 * AP might be in ROC in case we have just
4184 * sent auth reply. handle it.
4186 if (test_bit(wlvif->role_id, wl->roc_map))
4187 wl12xx_croc(wl, wlvif->role_id);
4189 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4193 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4194 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4196 wl1271_debug(DEBUG_AP, "stopped AP");
4201 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4205 /* Handle HT information change */
4206 if ((changed & BSS_CHANGED_HT) &&
4207 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4208 ret = wl1271_acx_set_ht_information(wl, wlvif,
4209 bss_conf->ht_operation_mode);
4211 wl1271_warning("Set ht information failed %d", ret);
4220 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4221 struct ieee80211_bss_conf *bss_conf,
4227 wl1271_debug(DEBUG_MAC80211,
4228 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4229 bss_conf->bssid, bss_conf->aid,
4230 bss_conf->beacon_int,
4231 bss_conf->basic_rates, sta_rate_set);
4233 wlvif->beacon_int = bss_conf->beacon_int;
4234 rates = bss_conf->basic_rates;
4235 wlvif->basic_rate_set =
4236 wl1271_tx_enabled_rates_get(wl, rates,
4239 wl1271_tx_min_rate_get(wl,
4240 wlvif->basic_rate_set);
4244 wl1271_tx_enabled_rates_get(wl,
4248 /* we only support sched_scan while not connected */
4249 if (wl->sched_vif == wlvif)
4250 wl->ops->sched_scan_stop(wl, wlvif);
4252 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4256 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4260 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4264 wlcore_set_ssid(wl, wlvif);
4266 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4271 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4275 /* revert back to minimum rates for the current band */
4276 wl1271_set_band_rate(wl, wlvif);
4277 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4279 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4283 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4284 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4285 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4290 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4293 /* STA/IBSS mode changes */
4294 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4295 struct ieee80211_vif *vif,
4296 struct ieee80211_bss_conf *bss_conf,
4299 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4300 bool do_join = false;
4301 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4302 bool ibss_joined = false;
4303 u32 sta_rate_set = 0;
4305 struct ieee80211_sta *sta;
4306 bool sta_exists = false;
4307 struct ieee80211_sta_ht_cap sta_ht_cap;
4310 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4316 if (changed & BSS_CHANGED_IBSS) {
4317 if (bss_conf->ibss_joined) {
4318 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4321 wlcore_unset_assoc(wl, wlvif);
4322 wl12xx_cmd_role_stop_sta(wl, wlvif);
4326 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4329 /* Need to update the SSID (for filtering etc) */
4330 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4333 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4334 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4335 bss_conf->enable_beacon ? "enabled" : "disabled");
4340 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4341 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4343 if (changed & BSS_CHANGED_CQM) {
4344 bool enable = false;
4345 if (bss_conf->cqm_rssi_thold)
4347 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4348 bss_conf->cqm_rssi_thold,
4349 bss_conf->cqm_rssi_hyst);
4352 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4355 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4356 BSS_CHANGED_ASSOC)) {
4358 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4360 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4362 /* save the supp_rates of the ap */
4363 sta_rate_set = sta->supp_rates[wlvif->band];
4364 if (sta->ht_cap.ht_supported)
4366 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4367 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4368 sta_ht_cap = sta->ht_cap;
4375 if (changed & BSS_CHANGED_BSSID) {
4376 if (!is_zero_ether_addr(bss_conf->bssid)) {
4377 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4382 /* Need to update the BSSID (for filtering etc) */
4385 ret = wlcore_clear_bssid(wl, wlvif);
4391 if (changed & BSS_CHANGED_IBSS) {
4392 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4393 bss_conf->ibss_joined);
4395 if (bss_conf->ibss_joined) {
4396 u32 rates = bss_conf->basic_rates;
4397 wlvif->basic_rate_set =
4398 wl1271_tx_enabled_rates_get(wl, rates,
4401 wl1271_tx_min_rate_get(wl,
4402 wlvif->basic_rate_set);
4404 /* by default, use 11b + OFDM rates */
4405 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4406 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4412 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4413 /* enable beacon filtering */
4414 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4419 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4424 ret = wlcore_join(wl, wlvif);
4426 wl1271_warning("cmd join failed %d", ret);
4431 if (changed & BSS_CHANGED_ASSOC) {
4432 if (bss_conf->assoc) {
4433 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4438 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4439 wl12xx_set_authorized(wl, wlvif);
4441 wlcore_unset_assoc(wl, wlvif);
4445 if (changed & BSS_CHANGED_PS) {
4446 if ((bss_conf->ps) &&
4447 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4448 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4452 if (wl->conf.conn.forced_ps) {
4453 ps_mode = STATION_POWER_SAVE_MODE;
4454 ps_mode_str = "forced";
4456 ps_mode = STATION_AUTO_PS_MODE;
4457 ps_mode_str = "auto";
4460 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4462 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4464 wl1271_warning("enter %s ps failed %d",
4466 } else if (!bss_conf->ps &&
4467 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4468 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4470 ret = wl1271_ps_set_mode(wl, wlvif,
4471 STATION_ACTIVE_MODE);
4473 wl1271_warning("exit auto ps failed %d", ret);
4477 /* Handle new association with HT. Do this after join. */
4480 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4482 ret = wlcore_hw_set_peer_cap(wl,
4488 wl1271_warning("Set ht cap failed %d", ret);
4494 ret = wl1271_acx_set_ht_information(wl, wlvif,
4495 bss_conf->ht_operation_mode);
4497 wl1271_warning("Set ht information failed %d",
4504 /* Handle arp filtering. Done after join. */
4505 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4506 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4507 __be32 addr = bss_conf->arp_addr_list[0];
4508 wlvif->sta.qos = bss_conf->qos;
4509 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4511 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4512 wlvif->ip_addr = addr;
4514 * The template should have been configured only upon
4515 * association. however, it seems that the correct ip
4516 * isn't being set (when sending), so we have to
4517 * reconfigure the template upon every ip change.
4519 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4521 wl1271_warning("build arp rsp failed: %d", ret);
4525 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4526 (ACX_ARP_FILTER_ARP_FILTERING |
4527 ACX_ARP_FILTER_AUTO_ARP),
4531 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4542 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4543 struct ieee80211_vif *vif,
4544 struct ieee80211_bss_conf *bss_conf,
4547 struct wl1271 *wl = hw->priv;
4548 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4549 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4552 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4553 wlvif->role_id, (int)changed);
4556 * make sure to cancel pending disconnections if our association
4559 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4560 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4562 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4563 !bss_conf->enable_beacon)
4564 wl1271_tx_flush(wl);
4566 mutex_lock(&wl->mutex);
4568 if (unlikely(wl->state != WLCORE_STATE_ON))
4571 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4574 ret = wl1271_ps_elp_wakeup(wl);
4578 if ((changed & BSS_CHANGED_TXPOWER) &&
4579 bss_conf->txpower != wlvif->power_level) {
4581 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4585 wlvif->power_level = bss_conf->txpower;
4589 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4591 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4593 wl1271_ps_elp_sleep(wl);
4596 mutex_unlock(&wl->mutex);
4599 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4600 struct ieee80211_chanctx_conf *ctx)
4602 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4603 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4604 cfg80211_get_chandef_type(&ctx->def));
4608 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4609 struct ieee80211_chanctx_conf *ctx)
4611 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4612 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4613 cfg80211_get_chandef_type(&ctx->def));
4616 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4617 struct ieee80211_chanctx_conf *ctx,
4620 struct wl1271 *wl = hw->priv;
4621 struct wl12xx_vif *wlvif;
4623 int channel = ieee80211_frequency_to_channel(
4624 ctx->def.chan->center_freq);
4626 wl1271_debug(DEBUG_MAC80211,
4627 "mac80211 change chanctx %d (type %d) changed 0x%x",
4628 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4630 mutex_lock(&wl->mutex);
4632 ret = wl1271_ps_elp_wakeup(wl);
4636 wl12xx_for_each_wlvif(wl, wlvif) {
4637 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4640 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4646 /* start radar if needed */
4647 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4648 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4649 ctx->radar_enabled && !wlvif->radar_enabled &&
4650 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4651 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4652 wlcore_hw_set_cac(wl, wlvif, true);
4653 wlvif->radar_enabled = true;
4657 wl1271_ps_elp_sleep(wl);
4659 mutex_unlock(&wl->mutex);
4662 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4663 struct ieee80211_vif *vif,
4664 struct ieee80211_chanctx_conf *ctx)
4666 struct wl1271 *wl = hw->priv;
4667 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4668 int channel = ieee80211_frequency_to_channel(
4669 ctx->def.chan->center_freq);
4672 wl1271_debug(DEBUG_MAC80211,
4673 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4674 wlvif->role_id, channel,
4675 cfg80211_get_chandef_type(&ctx->def),
4676 ctx->radar_enabled, ctx->def.chan->dfs_state);
4678 mutex_lock(&wl->mutex);
4680 if (unlikely(wl->state != WLCORE_STATE_ON))
4683 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4686 ret = wl1271_ps_elp_wakeup(wl);
4690 wlvif->band = ctx->def.chan->band;
4691 wlvif->channel = channel;
4692 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4694 /* update default rates according to the band */
4695 wl1271_set_band_rate(wl, wlvif);
4697 if (ctx->radar_enabled &&
4698 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4699 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4700 wlcore_hw_set_cac(wl, wlvif, true);
4701 wlvif->radar_enabled = true;
4704 wl1271_ps_elp_sleep(wl);
4706 mutex_unlock(&wl->mutex);
4711 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4712 struct ieee80211_vif *vif,
4713 struct ieee80211_chanctx_conf *ctx)
4715 struct wl1271 *wl = hw->priv;
4716 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4719 wl1271_debug(DEBUG_MAC80211,
4720 "mac80211 unassign chanctx (role %d) %d (type %d)",
4722 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4723 cfg80211_get_chandef_type(&ctx->def));
4725 wl1271_tx_flush(wl);
4727 mutex_lock(&wl->mutex);
4729 if (unlikely(wl->state != WLCORE_STATE_ON))
4732 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4735 ret = wl1271_ps_elp_wakeup(wl);
4739 if (wlvif->radar_enabled) {
4740 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4741 wlcore_hw_set_cac(wl, wlvif, false);
4742 wlvif->radar_enabled = false;
4745 wl1271_ps_elp_sleep(wl);
4747 mutex_unlock(&wl->mutex);
4750 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4751 struct wl12xx_vif *wlvif,
4752 struct ieee80211_chanctx_conf *new_ctx)
4754 int channel = ieee80211_frequency_to_channel(
4755 new_ctx->def.chan->center_freq);
4757 wl1271_debug(DEBUG_MAC80211,
4758 "switch vif (role %d) %d -> %d chan_type: %d",
4759 wlvif->role_id, wlvif->channel, channel,
4760 cfg80211_get_chandef_type(&new_ctx->def));
4762 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4765 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4767 if (wlvif->radar_enabled) {
4768 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4769 wlcore_hw_set_cac(wl, wlvif, false);
4770 wlvif->radar_enabled = false;
4773 wlvif->band = new_ctx->def.chan->band;
4774 wlvif->channel = channel;
4775 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4777 /* start radar if needed */
4778 if (new_ctx->radar_enabled) {
4779 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4780 wlcore_hw_set_cac(wl, wlvif, true);
4781 wlvif->radar_enabled = true;
4788 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4789 struct ieee80211_vif_chanctx_switch *vifs,
4791 enum ieee80211_chanctx_switch_mode mode)
4793 struct wl1271 *wl = hw->priv;
4796 wl1271_debug(DEBUG_MAC80211,
4797 "mac80211 switch chanctx n_vifs %d mode %d",
4800 mutex_lock(&wl->mutex);
4802 ret = wl1271_ps_elp_wakeup(wl);
4806 for (i = 0; i < n_vifs; i++) {
4807 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4809 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4814 wl1271_ps_elp_sleep(wl);
4816 mutex_unlock(&wl->mutex);
4821 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4822 struct ieee80211_vif *vif, u16 queue,
4823 const struct ieee80211_tx_queue_params *params)
4825 struct wl1271 *wl = hw->priv;
4826 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4830 if (wlcore_is_p2p_mgmt(wlvif))
4833 mutex_lock(&wl->mutex);
4835 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4838 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4840 ps_scheme = CONF_PS_SCHEME_LEGACY;
4842 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4845 ret = wl1271_ps_elp_wakeup(wl);
4850 * the txop is confed in units of 32us by the mac80211,
4853 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4854 params->cw_min, params->cw_max,
4855 params->aifs, params->txop << 5);
4859 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4860 CONF_CHANNEL_TYPE_EDCF,
4861 wl1271_tx_get_queue(queue),
4862 ps_scheme, CONF_ACK_POLICY_LEGACY,
4866 wl1271_ps_elp_sleep(wl);
4869 mutex_unlock(&wl->mutex);
4874 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4875 struct ieee80211_vif *vif)
4878 struct wl1271 *wl = hw->priv;
4879 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4880 u64 mactime = ULLONG_MAX;
4883 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4885 mutex_lock(&wl->mutex);
4887 if (unlikely(wl->state != WLCORE_STATE_ON))
4890 ret = wl1271_ps_elp_wakeup(wl);
4894 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4899 wl1271_ps_elp_sleep(wl);
4902 mutex_unlock(&wl->mutex);
4906 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4907 struct survey_info *survey)
4909 struct ieee80211_conf *conf = &hw->conf;
4914 survey->channel = conf->chandef.chan;
4919 static int wl1271_allocate_sta(struct wl1271 *wl,
4920 struct wl12xx_vif *wlvif,
4921 struct ieee80211_sta *sta)
4923 struct wl1271_station *wl_sta;
4927 if (wl->active_sta_count >= wl->max_ap_stations) {
4928 wl1271_warning("could not allocate HLID - too much stations");
4932 wl_sta = (struct wl1271_station *)sta->drv_priv;
4933 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4935 wl1271_warning("could not allocate HLID - too many links");
4939 /* use the previous security seq, if this is a recovery/resume */
4940 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4942 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4943 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4944 wl->active_sta_count++;
4948 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4950 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4953 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4954 __clear_bit(hlid, &wl->ap_ps_map);
4955 __clear_bit(hlid, &wl->ap_fw_ps_map);
4958 * save the last used PN in the private part of iee80211_sta,
4959 * in case of recovery/suspend
4961 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4963 wl12xx_free_link(wl, wlvif, &hlid);
4964 wl->active_sta_count--;
4967 * rearm the tx watchdog when the last STA is freed - give the FW a
4968 * chance to return STA-buffered packets before complaining.
4970 if (wl->active_sta_count == 0)
4971 wl12xx_rearm_tx_watchdog_locked(wl);
4974 static int wl12xx_sta_add(struct wl1271 *wl,
4975 struct wl12xx_vif *wlvif,
4976 struct ieee80211_sta *sta)
4978 struct wl1271_station *wl_sta;
4982 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4984 ret = wl1271_allocate_sta(wl, wlvif, sta);
4988 wl_sta = (struct wl1271_station *)sta->drv_priv;
4990 hlid = wl_sta->hlid;
4992 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4994 wl1271_free_sta(wl, wlvif, hlid);
4999 static int wl12xx_sta_remove(struct wl1271 *wl,
5000 struct wl12xx_vif *wlvif,
5001 struct ieee80211_sta *sta)
5003 struct wl1271_station *wl_sta;
5006 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5008 wl_sta = (struct wl1271_station *)sta->drv_priv;
5010 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5013 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5017 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5021 static void wlcore_roc_if_possible(struct wl1271 *wl,
5022 struct wl12xx_vif *wlvif)
5024 if (find_first_bit(wl->roc_map,
5025 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5028 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5031 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5035 * when wl_sta is NULL, we treat this call as if coming from a
5036 * pending auth reply.
5037 * wl->mutex must be taken and the FW must be awake when the call
5040 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5041 struct wl1271_station *wl_sta, bool in_conn)
5044 if (WARN_ON(wl_sta && wl_sta->in_connection))
5047 if (!wlvif->ap_pending_auth_reply &&
5048 !wlvif->inconn_count)
5049 wlcore_roc_if_possible(wl, wlvif);
5052 wl_sta->in_connection = true;
5053 wlvif->inconn_count++;
5055 wlvif->ap_pending_auth_reply = true;
5058 if (wl_sta && !wl_sta->in_connection)
5061 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5064 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5068 wl_sta->in_connection = false;
5069 wlvif->inconn_count--;
5071 wlvif->ap_pending_auth_reply = false;
5074 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5075 test_bit(wlvif->role_id, wl->roc_map))
5076 wl12xx_croc(wl, wlvif->role_id);
5080 static int wl12xx_update_sta_state(struct wl1271 *wl,
5081 struct wl12xx_vif *wlvif,
5082 struct ieee80211_sta *sta,
5083 enum ieee80211_sta_state old_state,
5084 enum ieee80211_sta_state new_state)
5086 struct wl1271_station *wl_sta;
5087 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5088 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5091 wl_sta = (struct wl1271_station *)sta->drv_priv;
5093 /* Add station (AP mode) */
5095 old_state == IEEE80211_STA_NOTEXIST &&
5096 new_state == IEEE80211_STA_NONE) {
5097 ret = wl12xx_sta_add(wl, wlvif, sta);
5101 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5104 /* Remove station (AP mode) */
5106 old_state == IEEE80211_STA_NONE &&
5107 new_state == IEEE80211_STA_NOTEXIST) {
5109 wl12xx_sta_remove(wl, wlvif, sta);
5111 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5114 /* Authorize station (AP mode) */
5116 new_state == IEEE80211_STA_AUTHORIZED) {
5117 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5121 /* reconfigure rates */
5122 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5126 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5131 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5134 /* Authorize station */
5136 new_state == IEEE80211_STA_AUTHORIZED) {
5137 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5138 ret = wl12xx_set_authorized(wl, wlvif);
5144 old_state == IEEE80211_STA_AUTHORIZED &&
5145 new_state == IEEE80211_STA_ASSOC) {
5146 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5147 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5150 /* save seq number on disassoc (suspend) */
5152 old_state == IEEE80211_STA_ASSOC &&
5153 new_state == IEEE80211_STA_AUTH) {
5154 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5155 wlvif->total_freed_pkts = 0;
5158 /* restore seq number on assoc (resume) */
5160 old_state == IEEE80211_STA_AUTH &&
5161 new_state == IEEE80211_STA_ASSOC) {
5162 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5165 /* clear ROCs on failure or authorization */
5167 (new_state == IEEE80211_STA_AUTHORIZED ||
5168 new_state == IEEE80211_STA_NOTEXIST)) {
5169 if (test_bit(wlvif->role_id, wl->roc_map))
5170 wl12xx_croc(wl, wlvif->role_id);
5174 old_state == IEEE80211_STA_NOTEXIST &&
5175 new_state == IEEE80211_STA_NONE) {
5176 if (find_first_bit(wl->roc_map,
5177 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5178 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5179 wl12xx_roc(wl, wlvif, wlvif->role_id,
5180 wlvif->band, wlvif->channel);
5186 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5187 struct ieee80211_vif *vif,
5188 struct ieee80211_sta *sta,
5189 enum ieee80211_sta_state old_state,
5190 enum ieee80211_sta_state new_state)
5192 struct wl1271 *wl = hw->priv;
5193 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5196 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5197 sta->aid, old_state, new_state);
5199 mutex_lock(&wl->mutex);
5201 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5206 ret = wl1271_ps_elp_wakeup(wl);
5210 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5212 wl1271_ps_elp_sleep(wl);
5214 mutex_unlock(&wl->mutex);
5215 if (new_state < old_state)
5220 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5221 struct ieee80211_vif *vif,
5222 struct ieee80211_ampdu_params *params)
5224 struct wl1271 *wl = hw->priv;
5225 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5227 u8 hlid, *ba_bitmap;
5228 struct ieee80211_sta *sta = params->sta;
5229 enum ieee80211_ampdu_mlme_action action = params->action;
5230 u16 tid = params->tid;
5231 u16 *ssn = ¶ms->ssn;
5233 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5236 /* sanity check - the fields in FW are only 8bits wide */
5237 if (WARN_ON(tid > 0xFF))
5240 mutex_lock(&wl->mutex);
5242 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5247 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5248 hlid = wlvif->sta.hlid;
5249 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5250 struct wl1271_station *wl_sta;
5252 wl_sta = (struct wl1271_station *)sta->drv_priv;
5253 hlid = wl_sta->hlid;
5259 ba_bitmap = &wl->links[hlid].ba_bitmap;
5261 ret = wl1271_ps_elp_wakeup(wl);
5265 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5269 case IEEE80211_AMPDU_RX_START:
5270 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5275 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5277 wl1271_error("exceeded max RX BA sessions");
5281 if (*ba_bitmap & BIT(tid)) {
5283 wl1271_error("cannot enable RX BA session on active "
5288 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5291 *ba_bitmap |= BIT(tid);
5292 wl->ba_rx_session_count++;
5296 case IEEE80211_AMPDU_RX_STOP:
5297 if (!(*ba_bitmap & BIT(tid))) {
5299 * this happens on reconfig - so only output a debug
5300 * message for now, and don't fail the function.
5302 wl1271_debug(DEBUG_MAC80211,
5303 "no active RX BA session on tid: %d",
5309 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5312 *ba_bitmap &= ~BIT(tid);
5313 wl->ba_rx_session_count--;
5318 * The BA initiator session management in FW independently.
5319 * Falling break here on purpose for all TX APDU commands.
5321 case IEEE80211_AMPDU_TX_START:
5322 case IEEE80211_AMPDU_TX_STOP_CONT:
5323 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5324 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5325 case IEEE80211_AMPDU_TX_OPERATIONAL:
5330 wl1271_error("Incorrect ampdu action id=%x\n", action);
5334 wl1271_ps_elp_sleep(wl);
5337 mutex_unlock(&wl->mutex);
5342 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5343 struct ieee80211_vif *vif,
5344 const struct cfg80211_bitrate_mask *mask)
5346 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5347 struct wl1271 *wl = hw->priv;
5350 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5351 mask->control[NL80211_BAND_2GHZ].legacy,
5352 mask->control[NL80211_BAND_5GHZ].legacy);
5354 mutex_lock(&wl->mutex);
5356 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5357 wlvif->bitrate_masks[i] =
5358 wl1271_tx_enabled_rates_get(wl,
5359 mask->control[i].legacy,
5362 if (unlikely(wl->state != WLCORE_STATE_ON))
5365 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5366 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5368 ret = wl1271_ps_elp_wakeup(wl);
5372 wl1271_set_band_rate(wl, wlvif);
5374 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5375 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5377 wl1271_ps_elp_sleep(wl);
5380 mutex_unlock(&wl->mutex);
5385 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5386 struct ieee80211_vif *vif,
5387 struct ieee80211_channel_switch *ch_switch)
5389 struct wl1271 *wl = hw->priv;
5390 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5393 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5395 wl1271_tx_flush(wl);
5397 mutex_lock(&wl->mutex);
5399 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5400 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5401 ieee80211_chswitch_done(vif, false);
5403 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5407 ret = wl1271_ps_elp_wakeup(wl);
5411 /* TODO: change mac80211 to pass vif as param */
5413 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5414 unsigned long delay_usec;
5416 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5420 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5422 /* indicate failure 5 seconds after channel switch time */
5423 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5425 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5426 usecs_to_jiffies(delay_usec) +
5427 msecs_to_jiffies(5000));
5431 wl1271_ps_elp_sleep(wl);
5434 mutex_unlock(&wl->mutex);
5437 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5438 struct wl12xx_vif *wlvif,
5441 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5442 struct sk_buff *beacon =
5443 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5448 return cfg80211_find_ie(eid,
5449 beacon->data + ieoffset,
5450 beacon->len - ieoffset);
5453 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5457 const struct ieee80211_channel_sw_ie *ie_csa;
5459 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5463 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5464 *csa_count = ie_csa->count;
5469 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5470 struct ieee80211_vif *vif,
5471 struct cfg80211_chan_def *chandef)
5473 struct wl1271 *wl = hw->priv;
5474 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5475 struct ieee80211_channel_switch ch_switch = {
5477 .chandef = *chandef,
5481 wl1271_debug(DEBUG_MAC80211,
5482 "mac80211 channel switch beacon (role %d)",
5485 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5487 wl1271_error("error getting beacon (for CSA counter)");
5491 mutex_lock(&wl->mutex);
5493 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5498 ret = wl1271_ps_elp_wakeup(wl);
5502 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5506 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5509 wl1271_ps_elp_sleep(wl);
5511 mutex_unlock(&wl->mutex);
5514 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5515 u32 queues, bool drop)
5517 struct wl1271 *wl = hw->priv;
5519 wl1271_tx_flush(wl);
5522 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5523 struct ieee80211_vif *vif,
5524 struct ieee80211_channel *chan,
5526 enum ieee80211_roc_type type)
5528 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5529 struct wl1271 *wl = hw->priv;
5530 int channel, active_roc, ret = 0;
5532 channel = ieee80211_frequency_to_channel(chan->center_freq);
5534 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5535 channel, wlvif->role_id);
5537 mutex_lock(&wl->mutex);
5539 if (unlikely(wl->state != WLCORE_STATE_ON))
5542 /* return EBUSY if we can't ROC right now */
5543 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5544 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5545 wl1271_warning("active roc on role %d", active_roc);
5550 ret = wl1271_ps_elp_wakeup(wl);
5554 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5559 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5560 msecs_to_jiffies(duration));
5562 wl1271_ps_elp_sleep(wl);
5564 mutex_unlock(&wl->mutex);
5568 static int __wlcore_roc_completed(struct wl1271 *wl)
5570 struct wl12xx_vif *wlvif;
5573 /* already completed */
5574 if (unlikely(!wl->roc_vif))
5577 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5579 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5582 ret = wl12xx_stop_dev(wl, wlvif);
5591 static int wlcore_roc_completed(struct wl1271 *wl)
5595 wl1271_debug(DEBUG_MAC80211, "roc complete");
5597 mutex_lock(&wl->mutex);
5599 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5604 ret = wl1271_ps_elp_wakeup(wl);
5608 ret = __wlcore_roc_completed(wl);
5610 wl1271_ps_elp_sleep(wl);
5612 mutex_unlock(&wl->mutex);
5617 static void wlcore_roc_complete_work(struct work_struct *work)
5619 struct delayed_work *dwork;
5623 dwork = to_delayed_work(work);
5624 wl = container_of(dwork, struct wl1271, roc_complete_work);
5626 ret = wlcore_roc_completed(wl);
5628 ieee80211_remain_on_channel_expired(wl->hw);
5631 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5633 struct wl1271 *wl = hw->priv;
5635 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5638 wl1271_tx_flush(wl);
5641 * we can't just flush_work here, because it might deadlock
5642 * (as we might get called from the same workqueue)
5644 cancel_delayed_work_sync(&wl->roc_complete_work);
5645 wlcore_roc_completed(wl);
5650 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5651 struct ieee80211_vif *vif,
5652 struct ieee80211_sta *sta,
5655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5657 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5659 if (!(changed & IEEE80211_RC_BW_CHANGED))
5662 /* this callback is atomic, so schedule a new work */
5663 wlvif->rc_update_bw = sta->bandwidth;
5664 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5665 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5668 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5669 struct ieee80211_vif *vif,
5670 struct ieee80211_sta *sta,
5671 struct station_info *sinfo)
5673 struct wl1271 *wl = hw->priv;
5674 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5678 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5680 mutex_lock(&wl->mutex);
5682 if (unlikely(wl->state != WLCORE_STATE_ON))
5685 ret = wl1271_ps_elp_wakeup(wl);
5689 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5693 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5694 sinfo->signal = rssi_dbm;
5697 wl1271_ps_elp_sleep(wl);
5700 mutex_unlock(&wl->mutex);
5703 static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta)
5705 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5706 struct wl1271 *wl = wl_sta->wl;
5707 u8 hlid = wl_sta->hlid;
5709 /* return in units of Kbps */
5710 return (wl->links[hlid].fw_rate_mbps * 1000);
5713 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5715 struct wl1271 *wl = hw->priv;
5718 mutex_lock(&wl->mutex);
5720 if (unlikely(wl->state != WLCORE_STATE_ON))
5723 /* packets are considered pending if in the TX queue or the FW */
5724 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5726 mutex_unlock(&wl->mutex);
5731 /* can't be const, mac80211 writes to this */
5732 static struct ieee80211_rate wl1271_rates[] = {
5734 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5735 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5737 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5738 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5739 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5741 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5742 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5743 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5745 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5746 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5747 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5749 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5750 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5752 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5753 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5755 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5756 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5758 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5759 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5761 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5762 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5764 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5765 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5767 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5768 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5770 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5771 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5774 /* can't be const, mac80211 writes to this */
5775 static struct ieee80211_channel wl1271_channels[] = {
5776 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5777 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5778 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5779 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5780 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5781 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5782 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5783 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5784 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5785 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5786 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5787 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5788 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5789 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5792 /* can't be const, mac80211 writes to this */
5793 static struct ieee80211_supported_band wl1271_band_2ghz = {
5794 .channels = wl1271_channels,
5795 .n_channels = ARRAY_SIZE(wl1271_channels),
5796 .bitrates = wl1271_rates,
5797 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5800 /* 5 GHz data rates for WL1273 */
5801 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5803 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5804 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5806 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5807 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5809 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5810 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5812 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5813 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5815 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5816 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5818 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5819 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5821 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5822 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5824 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5825 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5828 /* 5 GHz band channels for WL1273 */
5829 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5830 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5831 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5832 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5833 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5834 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5835 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5836 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5845 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5849 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5850 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5851 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5852 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5853 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5854 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5855 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5856 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5857 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5858 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5859 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5863 static struct ieee80211_supported_band wl1271_band_5ghz = {
5864 .channels = wl1271_channels_5ghz,
5865 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5866 .bitrates = wl1271_rates_5ghz,
5867 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5870 static const struct ieee80211_ops wl1271_ops = {
5871 .start = wl1271_op_start,
5872 .stop = wlcore_op_stop,
5873 .add_interface = wl1271_op_add_interface,
5874 .remove_interface = wl1271_op_remove_interface,
5875 .change_interface = wl12xx_op_change_interface,
5877 .suspend = wl1271_op_suspend,
5878 .resume = wl1271_op_resume,
5880 .config = wl1271_op_config,
5881 .prepare_multicast = wl1271_op_prepare_multicast,
5882 .configure_filter = wl1271_op_configure_filter,
5884 .set_key = wlcore_op_set_key,
5885 .hw_scan = wl1271_op_hw_scan,
5886 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5887 .sched_scan_start = wl1271_op_sched_scan_start,
5888 .sched_scan_stop = wl1271_op_sched_scan_stop,
5889 .bss_info_changed = wl1271_op_bss_info_changed,
5890 .set_frag_threshold = wl1271_op_set_frag_threshold,
5891 .set_rts_threshold = wl1271_op_set_rts_threshold,
5892 .conf_tx = wl1271_op_conf_tx,
5893 .get_tsf = wl1271_op_get_tsf,
5894 .get_survey = wl1271_op_get_survey,
5895 .sta_state = wl12xx_op_sta_state,
5896 .ampdu_action = wl1271_op_ampdu_action,
5897 .tx_frames_pending = wl1271_tx_frames_pending,
5898 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5899 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5900 .channel_switch = wl12xx_op_channel_switch,
5901 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5902 .flush = wlcore_op_flush,
5903 .remain_on_channel = wlcore_op_remain_on_channel,
5904 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5905 .add_chanctx = wlcore_op_add_chanctx,
5906 .remove_chanctx = wlcore_op_remove_chanctx,
5907 .change_chanctx = wlcore_op_change_chanctx,
5908 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5909 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5910 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5911 .sta_rc_update = wlcore_op_sta_rc_update,
5912 .sta_statistics = wlcore_op_sta_statistics,
5913 .get_expected_throughput = wlcore_op_get_expected_throughput,
5914 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5918 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5924 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5925 wl1271_error("Illegal RX rate from HW: %d", rate);
5929 idx = wl->band_rate_to_idx[band][rate];
5930 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5931 wl1271_error("Unsupported RX rate from HW: %d", rate);
5938 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5942 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5945 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5946 wl1271_warning("NIC part of the MAC address wraps around!");
5948 for (i = 0; i < wl->num_mac_addr; i++) {
5949 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5950 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5951 wl->addresses[i].addr[2] = (u8) oui;
5952 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5953 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5954 wl->addresses[i].addr[5] = (u8) nic;
5958 /* we may be one address short at the most */
5959 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5962 * turn on the LAA bit in the first address and use it as
5965 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5966 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5967 memcpy(&wl->addresses[idx], &wl->addresses[0],
5968 sizeof(wl->addresses[0]));
5970 wl->addresses[idx].addr[0] |= BIT(1);
5973 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5974 wl->hw->wiphy->addresses = wl->addresses;
5977 static int wl12xx_get_hw_info(struct wl1271 *wl)
5981 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5985 wl->fuse_oui_addr = 0;
5986 wl->fuse_nic_addr = 0;
5988 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5992 if (wl->ops->get_mac)
5993 ret = wl->ops->get_mac(wl);
5999 static int wl1271_register_hw(struct wl1271 *wl)
6002 u32 oui_addr = 0, nic_addr = 0;
6004 if (wl->mac80211_registered)
6007 if (wl->nvs_len >= 12) {
6008 /* NOTE: The wl->nvs->nvs element must be first, in
6009 * order to simplify the casting, we assume it is at
6010 * the beginning of the wl->nvs structure.
6012 u8 *nvs_ptr = (u8 *)wl->nvs;
6015 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6017 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6020 /* if the MAC address is zeroed in the NVS derive from fuse */
6021 if (oui_addr == 0 && nic_addr == 0) {
6022 oui_addr = wl->fuse_oui_addr;
6023 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6024 nic_addr = wl->fuse_nic_addr + 1;
6027 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6029 ret = ieee80211_register_hw(wl->hw);
6031 wl1271_error("unable to register mac80211 hw: %d", ret);
6035 wl->mac80211_registered = true;
6037 wl1271_debugfs_init(wl);
6039 wl1271_notice("loaded");
6045 static void wl1271_unregister_hw(struct wl1271 *wl)
6048 wl1271_plt_stop(wl);
6050 ieee80211_unregister_hw(wl->hw);
6051 wl->mac80211_registered = false;
6055 static int wl1271_init_ieee80211(struct wl1271 *wl)
6058 static const u32 cipher_suites[] = {
6059 WLAN_CIPHER_SUITE_WEP40,
6060 WLAN_CIPHER_SUITE_WEP104,
6061 WLAN_CIPHER_SUITE_TKIP,
6062 WLAN_CIPHER_SUITE_CCMP,
6063 WL1271_CIPHER_SUITE_GEM,
6066 /* The tx descriptor buffer */
6067 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6069 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6070 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6073 /* FIXME: find a proper value */
6074 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6076 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6077 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6078 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6079 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6080 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6081 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6082 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6083 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6084 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6085 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6086 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6087 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6088 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6090 wl->hw->wiphy->cipher_suites = cipher_suites;
6091 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6093 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6094 BIT(NL80211_IFTYPE_AP) |
6095 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6096 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6097 #ifdef CONFIG_MAC80211_MESH
6098 BIT(NL80211_IFTYPE_MESH_POINT) |
6100 BIT(NL80211_IFTYPE_P2P_GO);
6102 wl->hw->wiphy->max_scan_ssids = 1;
6103 wl->hw->wiphy->max_sched_scan_ssids = 16;
6104 wl->hw->wiphy->max_match_sets = 16;
6106 * Maximum length of elements in scanning probe request templates
6107 * should be the maximum length possible for a template, without
6108 * the IEEE80211 header of the template
6110 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6111 sizeof(struct ieee80211_header);
6113 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6114 sizeof(struct ieee80211_header);
6116 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6118 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6119 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6120 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6121 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6123 /* make sure all our channels fit in the scanned_ch bitmask */
6124 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6125 ARRAY_SIZE(wl1271_channels_5ghz) >
6126 WL1271_MAX_CHANNELS);
6128 * clear channel flags from the previous usage
6129 * and restore max_power & max_antenna_gain values.
6131 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6132 wl1271_band_2ghz.channels[i].flags = 0;
6133 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6134 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6137 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6138 wl1271_band_5ghz.channels[i].flags = 0;
6139 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6140 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6144 * We keep local copies of the band structs because we need to
6145 * modify them on a per-device basis.
6147 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6148 sizeof(wl1271_band_2ghz));
6149 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6150 &wl->ht_cap[NL80211_BAND_2GHZ],
6151 sizeof(*wl->ht_cap));
6152 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6153 sizeof(wl1271_band_5ghz));
6154 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6155 &wl->ht_cap[NL80211_BAND_5GHZ],
6156 sizeof(*wl->ht_cap));
6158 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6159 &wl->bands[NL80211_BAND_2GHZ];
6160 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6161 &wl->bands[NL80211_BAND_5GHZ];
6164 * allow 4 queues per mac address we support +
6165 * 1 cab queue per mac + one global offchannel Tx queue
6167 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6169 /* the last queue is the offchannel queue */
6170 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6171 wl->hw->max_rates = 1;
6173 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6175 /* the FW answers probe-requests in AP-mode */
6176 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6177 wl->hw->wiphy->probe_resp_offload =
6178 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6179 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6180 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6182 /* allowed interface combinations */
6183 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6184 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6186 /* register vendor commands */
6187 wlcore_set_vendor_commands(wl->hw->wiphy);
6189 SET_IEEE80211_DEV(wl->hw, wl->dev);
6191 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6192 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6194 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6199 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6202 struct ieee80211_hw *hw;
6207 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6209 wl1271_error("could not alloc ieee80211_hw");
6215 memset(wl, 0, sizeof(*wl));
6217 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6219 wl1271_error("could not alloc wl priv");
6221 goto err_priv_alloc;
6224 INIT_LIST_HEAD(&wl->wlvif_list);
6229 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6230 * we don't allocate any additional resource here, so that's fine.
6232 for (i = 0; i < NUM_TX_QUEUES; i++)
6233 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6234 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6236 skb_queue_head_init(&wl->deferred_rx_queue);
6237 skb_queue_head_init(&wl->deferred_tx_queue);
6239 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6240 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6241 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6242 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6243 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6244 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6245 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6247 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6248 if (!wl->freezable_wq) {
6255 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6256 wl->band = NL80211_BAND_2GHZ;
6257 wl->channel_type = NL80211_CHAN_NO_HT;
6259 wl->sg_enabled = true;
6260 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6261 wl->recovery_count = 0;
6264 wl->ap_fw_ps_map = 0;
6266 wl->system_hlid = WL12XX_SYSTEM_HLID;
6267 wl->active_sta_count = 0;
6268 wl->active_link_count = 0;
6271 /* The system link is always allocated */
6272 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6274 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6275 for (i = 0; i < wl->num_tx_desc; i++)
6276 wl->tx_frames[i] = NULL;
6278 spin_lock_init(&wl->wl_lock);
6280 wl->state = WLCORE_STATE_OFF;
6281 wl->fw_type = WL12XX_FW_TYPE_NONE;
6282 mutex_init(&wl->mutex);
6283 mutex_init(&wl->flush_mutex);
6284 init_completion(&wl->nvs_loading_complete);
6286 order = get_order(aggr_buf_size);
6287 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6288 if (!wl->aggr_buf) {
6292 wl->aggr_buf_size = aggr_buf_size;
6294 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6295 if (!wl->dummy_packet) {
6300 /* Allocate one page for the FW log */
6301 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6304 goto err_dummy_packet;
6307 wl->mbox_size = mbox_size;
6308 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6314 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6315 if (!wl->buffer_32) {
6326 free_page((unsigned long)wl->fwlog);
6329 dev_kfree_skb(wl->dummy_packet);
6332 free_pages((unsigned long)wl->aggr_buf, order);
6335 destroy_workqueue(wl->freezable_wq);
6338 wl1271_debugfs_exit(wl);
6342 ieee80211_free_hw(hw);
6346 return ERR_PTR(ret);
6348 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6350 int wlcore_free_hw(struct wl1271 *wl)
6352 /* Unblock any fwlog readers */
6353 mutex_lock(&wl->mutex);
6354 wl->fwlog_size = -1;
6355 mutex_unlock(&wl->mutex);
6357 wlcore_sysfs_free(wl);
6359 kfree(wl->buffer_32);
6361 free_page((unsigned long)wl->fwlog);
6362 dev_kfree_skb(wl->dummy_packet);
6363 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6365 wl1271_debugfs_exit(wl);
6369 wl->fw_type = WL12XX_FW_TYPE_NONE;
6373 kfree(wl->raw_fw_status);
6374 kfree(wl->fw_status);
6375 kfree(wl->tx_res_if);
6376 destroy_workqueue(wl->freezable_wq);
6379 ieee80211_free_hw(wl->hw);
6383 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6386 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6387 .flags = WIPHY_WOWLAN_ANY,
6388 .n_patterns = WL1271_MAX_RX_FILTERS,
6389 .pattern_min_len = 1,
6390 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6394 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6396 return IRQ_WAKE_THREAD;
6399 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6401 struct wl1271 *wl = context;
6402 struct platform_device *pdev = wl->pdev;
6403 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6404 struct resource *res;
6407 irq_handler_t hardirq_fn = NULL;
6410 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6412 wl1271_error("Could not allocate nvs data");
6415 wl->nvs_len = fw->size;
6417 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6423 ret = wl->ops->setup(wl);
6427 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6429 /* adjust some runtime configuration parameters */
6430 wlcore_adjust_conf(wl);
6432 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6434 wl1271_error("Could not get IRQ resource");
6438 wl->irq = res->start;
6439 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6440 wl->if_ops = pdev_data->if_ops;
6442 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6443 hardirq_fn = wlcore_hardirq;
6445 wl->irq_flags |= IRQF_ONESHOT;
6447 ret = wl12xx_set_power_on(wl);
6451 ret = wl12xx_get_hw_info(wl);
6453 wl1271_error("couldn't get hw info");
6454 wl1271_power_off(wl);
6458 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6459 wl->irq_flags, pdev->name, wl);
6461 wl1271_error("interrupt configuration failed");
6462 wl1271_power_off(wl);
6467 ret = enable_irq_wake(wl->irq);
6469 wl->irq_wake_enabled = true;
6470 device_init_wakeup(wl->dev, 1);
6471 if (pdev_data->pwr_in_suspend)
6472 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6475 disable_irq(wl->irq);
6476 wl1271_power_off(wl);
6478 ret = wl->ops->identify_chip(wl);
6482 ret = wl1271_init_ieee80211(wl);
6486 ret = wl1271_register_hw(wl);
6490 ret = wlcore_sysfs_init(wl);
6494 wl->initialized = true;
6498 wl1271_unregister_hw(wl);
6501 free_irq(wl->irq, wl);
6507 release_firmware(fw);
6508 complete_all(&wl->nvs_loading_complete);
6511 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6515 if (!wl->ops || !wl->ptable)
6518 wl->dev = &pdev->dev;
6520 platform_set_drvdata(pdev, wl);
6522 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6523 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6526 wl1271_error("request_firmware_nowait failed: %d", ret);
6527 complete_all(&wl->nvs_loading_complete);
6532 EXPORT_SYMBOL_GPL(wlcore_probe);
6534 int wlcore_remove(struct platform_device *pdev)
6536 struct wl1271 *wl = platform_get_drvdata(pdev);
6538 wait_for_completion(&wl->nvs_loading_complete);
6539 if (!wl->initialized)
6542 if (wl->irq_wake_enabled) {
6543 device_init_wakeup(wl->dev, 0);
6544 disable_irq_wake(wl->irq);
6546 wl1271_unregister_hw(wl);
6547 free_irq(wl->irq, wl);
6552 EXPORT_SYMBOL_GPL(wlcore_remove);
6554 u32 wl12xx_debug_level = DEBUG_NONE;
6555 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6556 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6557 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6559 module_param_named(fwlog, fwlog_param, charp, 0);
6560 MODULE_PARM_DESC(fwlog,
6561 "FW logger options: continuous, dbgpins or disable");
6563 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6564 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6566 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6567 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6569 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6570 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6572 MODULE_LICENSE("GPL");
6573 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6574 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6575 MODULE_FIRMWARE(WL12XX_NVS_NAME);