3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
86 wl12xx_croc(wl, wlvif->role_id);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy *wiphy,
93 struct regulatory_request *request)
95 struct ieee80211_supported_band *band;
96 struct ieee80211_channel *ch;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
139 int period = wl->conf.rx_streaming.interval;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
167 mutex_lock(&wl->mutex);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
175 if (!wl->conf.rx_streaming.interval)
178 ret = wl1271_ps_elp_wakeup(wl);
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
191 wl1271_ps_elp_sleep(wl);
193 mutex_unlock(&wl->mutex);
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
203 mutex_lock(&wl->mutex);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
208 ret = wl1271_ps_elp_wakeup(wl);
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
217 wl1271_ps_elp_sleep(wl);
219 mutex_unlock(&wl->mutex);
222 static void wl1271_rx_streaming_timer(unsigned long data)
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 struct delayed_work *dwork;
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 mutex_lock(&wl->mutex);
251 if (unlikely(wl->state != WLCORE_STATE_ON))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
300 mutex_unlock(&wl->mutex);
303 static void wlcore_adjust_conf(struct wl1271 *wl)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
324 struct wl12xx_vif *wlvif,
327 bool fw_ps, single_sta;
329 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
330 single_sta = (wl->active_sta_count == 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
337 wl12xx_ps_link_end(wl, wlvif, hlid);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
345 wl12xx_ps_link_start(wl, wlvif, hlid, true);
348 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
349 struct wl12xx_vif *wlvif,
350 struct wl_fw_status_2 *status)
352 struct wl1271_link *lnk;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
359 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
360 wl1271_debug(DEBUG_PSM,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl->ap_fw_ps_map, cur_fw_ps_map,
363 wl->ap_fw_ps_map ^ cur_fw_ps_map);
365 wl->ap_fw_ps_map = cur_fw_ps_map;
368 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
369 lnk = &wl->links[hlid];
370 cnt = status->counters.tx_lnk_free_pkts[hlid] -
371 lnk->prev_freed_pkts;
373 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
374 lnk->allocated_pkts -= cnt;
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 lnk->allocated_pkts);
381 static int wlcore_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
385 struct wl12xx_vif *wlvif;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 sizeof(*status_2) + wl->fw_status_priv_len;
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1->fw_rx_counter,
405 status_1->drv_rx_counter,
406 status_1->tx_results_counter);
408 for (i = 0; i < NUM_TX_QUEUES; i++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl->tx_allocated_pkts[i] -=
411 (status_2->counters.tx_released_pkts[i] -
412 wl->tx_pkts_freed[i]) & 0xff;
414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
417 /* prevent wrap-around in total blocks counter */
418 if (likely(wl->tx_blocks_freed <=
419 le32_to_cpu(status_2->total_released_blks)))
420 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
423 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
424 le32_to_cpu(status_2->total_released_blks);
426 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
428 wl->tx_allocated_blocks -= freed_blocks;
431 * If the FW freed some blocks:
432 * If we still have allocated blocks - re-arm the timer, Tx is
433 * not stuck. Otherwise, cancel the timer (no Tx currently).
436 if (wl->tx_allocated_blocks)
437 wl12xx_rearm_tx_watchdog_locked(wl);
439 cancel_delayed_work(&wl->tx_watchdog_work);
442 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
445 * The FW might change the total number of TX memblocks before
446 * we get a notification about blocks being released. Thus, the
447 * available blocks calculation might yield a temporary result
448 * which is lower than the actual available blocks. Keeping in
449 * mind that only blocks that were allocated can be moved from
450 * TX to RX, tx_blocks_available should never decrease here.
452 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
455 /* if more blocks are available now, tx work can be scheduled */
456 if (wl->tx_blocks_available > old_tx_blk_count)
457 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
459 /* for AP update num of allocated TX blocks per link and ps status */
460 wl12xx_for_each_wlvif_ap(wl, wlvif) {
461 wl12xx_irq_update_links_status(wl, wlvif, status_2);
464 /* update the host-chipset time offset */
466 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
467 (s64)le32_to_cpu(status_2->fw_localtime);
472 static void wl1271_flush_deferred_work(struct wl1271 *wl)
476 /* Pass all received frames to the network stack */
477 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
478 ieee80211_rx_ni(wl->hw, skb);
480 /* Return sent skbs to the network stack */
481 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
482 ieee80211_tx_status_ni(wl->hw, skb);
485 static void wl1271_netstack_work(struct work_struct *work)
488 container_of(work, struct wl1271, netstack_work);
491 wl1271_flush_deferred_work(wl);
492 } while (skb_queue_len(&wl->deferred_rx_queue));
495 #define WL1271_IRQ_MAX_LOOPS 256
497 static int wlcore_irq_locked(struct wl1271 *wl)
501 int loopcount = WL1271_IRQ_MAX_LOOPS;
503 unsigned int defer_count;
507 * In case edge triggered interrupt must be used, we cannot iterate
508 * more than once without introducing race conditions with the hardirq.
510 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
513 wl1271_debug(DEBUG_IRQ, "IRQ work");
515 if (unlikely(wl->state != WLCORE_STATE_ON))
518 ret = wl1271_ps_elp_wakeup(wl);
522 while (!done && loopcount--) {
524 * In order to avoid a race with the hardirq, clear the flag
525 * before acknowledging the chip. Since the mutex is held,
526 * wl1271_ps_elp_wakeup cannot be called concurrently.
528 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
529 smp_mb__after_clear_bit();
531 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
535 wlcore_hw_tx_immediate_compl(wl);
537 intr = le32_to_cpu(wl->fw_status_1->intr);
538 intr &= WLCORE_ALL_INTR_MASK;
544 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
545 wl1271_error("HW watchdog interrupt received! starting recovery.");
546 wl->watchdog_recovery = true;
549 /* restarting the chip. ignore any other interrupt. */
553 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
554 wl1271_error("SW watchdog interrupt received! "
555 "starting recovery.");
556 wl->watchdog_recovery = true;
559 /* restarting the chip. ignore any other interrupt. */
563 if (likely(intr & WL1271_ACX_INTR_DATA)) {
564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
566 ret = wlcore_rx(wl, wl->fw_status_1);
570 /* Check if any tx blocks were freed */
571 spin_lock_irqsave(&wl->wl_lock, flags);
572 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
573 wl1271_tx_total_queue_count(wl) > 0) {
574 spin_unlock_irqrestore(&wl->wl_lock, flags);
576 * In order to avoid starvation of the TX path,
577 * call the work function directly.
579 ret = wlcore_tx_work_locked(wl);
583 spin_unlock_irqrestore(&wl->wl_lock, flags);
586 /* check for tx results */
587 ret = wlcore_hw_tx_delayed_compl(wl);
591 /* Make sure the deferred queues don't get too long */
592 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
593 skb_queue_len(&wl->deferred_rx_queue);
594 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
595 wl1271_flush_deferred_work(wl);
598 if (intr & WL1271_ACX_INTR_EVENT_A) {
599 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
600 ret = wl1271_event_handle(wl, 0);
605 if (intr & WL1271_ACX_INTR_EVENT_B) {
606 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
607 ret = wl1271_event_handle(wl, 1);
612 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
613 wl1271_debug(DEBUG_IRQ,
614 "WL1271_ACX_INTR_INIT_COMPLETE");
616 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
617 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
620 wl1271_ps_elp_sleep(wl);
626 static irqreturn_t wlcore_irq(int irq, void *cookie)
630 struct wl1271 *wl = cookie;
632 /* TX might be handled here, avoid redundant work */
633 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
634 cancel_work_sync(&wl->tx_work);
636 mutex_lock(&wl->mutex);
638 ret = wlcore_irq_locked(wl);
640 wl12xx_queue_recovery_work(wl);
642 spin_lock_irqsave(&wl->wl_lock, flags);
643 /* In case TX was not handled here, queue TX work */
644 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
645 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
646 wl1271_tx_total_queue_count(wl) > 0)
647 ieee80211_queue_work(wl->hw, &wl->tx_work);
648 spin_unlock_irqrestore(&wl->wl_lock, flags);
650 mutex_unlock(&wl->mutex);
655 struct vif_counter_data {
658 struct ieee80211_vif *cur_vif;
659 bool cur_vif_running;
662 static void wl12xx_vif_count_iter(void *data, u8 *mac,
663 struct ieee80211_vif *vif)
665 struct vif_counter_data *counter = data;
668 if (counter->cur_vif == vif)
669 counter->cur_vif_running = true;
672 /* caller must not hold wl->mutex, as it might deadlock */
673 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
674 struct ieee80211_vif *cur_vif,
675 struct vif_counter_data *data)
677 memset(data, 0, sizeof(*data));
678 data->cur_vif = cur_vif;
680 ieee80211_iterate_active_interfaces(hw,
681 wl12xx_vif_count_iter, data);
684 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
686 const struct firmware *fw;
688 enum wl12xx_fw_type fw_type;
692 fw_type = WL12XX_FW_TYPE_PLT;
693 fw_name = wl->plt_fw_name;
696 * we can't call wl12xx_get_vif_count() here because
697 * wl->mutex is taken, so use the cached last_vif_count value
699 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
700 fw_type = WL12XX_FW_TYPE_MULTI;
701 fw_name = wl->mr_fw_name;
703 fw_type = WL12XX_FW_TYPE_NORMAL;
704 fw_name = wl->sr_fw_name;
708 if (wl->fw_type == fw_type)
711 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
713 ret = request_firmware(&fw, fw_name, wl->dev);
716 wl1271_error("could not get firmware %s: %d", fw_name, ret);
721 wl1271_error("firmware size is not multiple of 32 bits: %zu",
728 wl->fw_type = WL12XX_FW_TYPE_NONE;
729 wl->fw_len = fw->size;
730 wl->fw = vmalloc(wl->fw_len);
733 wl1271_error("could not allocate memory for the firmware");
738 memcpy(wl->fw, fw->data, wl->fw_len);
740 wl->fw_type = fw_type;
742 release_firmware(fw);
747 void wl12xx_queue_recovery_work(struct wl1271 *wl)
749 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
751 /* Avoid a recursive recovery */
752 if (wl->state == WLCORE_STATE_ON) {
753 wl->state = WLCORE_STATE_RESTARTING;
754 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
755 wlcore_disable_interrupts_nosync(wl);
756 ieee80211_queue_work(wl->hw, &wl->recovery_work);
760 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
764 /* The FW log is a length-value list, find where the log end */
765 while (len < maxlen) {
766 if (memblock[len] == 0)
768 if (len + memblock[len] + 1 > maxlen)
770 len += memblock[len] + 1;
773 /* Make sure we have enough room */
774 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
776 /* Fill the FW log file, consumed by the sysfs fwlog entry */
777 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
778 wl->fwlog_size += len;
783 #define WLCORE_FW_LOG_END 0x2000000
785 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
793 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
794 (wl->conf.fwlog.mem_blocks == 0))
797 wl1271_info("Reading FW panic log");
799 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
804 * Make sure the chip is awake and the logger isn't active.
805 * Do not send a stop fwlog command if the fw is hanged.
807 if (wl1271_ps_elp_wakeup(wl))
809 if (!wl->watchdog_recovery)
810 wl12xx_cmd_stop_fwlog(wl);
812 /* Read the first memory block address */
813 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
817 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
821 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
822 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
823 end_of_log = WLCORE_FW_LOG_END;
825 offset = sizeof(addr);
829 /* Traverse the memory blocks linked list */
831 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
832 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
838 * Memory blocks are linked to one another. The first 4 bytes
839 * of each memory block hold the hardware address of the next
840 * one. The last memory block points to the first one in
841 * on demand mode and is equal to 0x2000000 in continuous mode.
843 addr = le32_to_cpup((__le32 *)block);
844 if (!wl12xx_copy_fwlog(wl, block + offset,
845 WL12XX_HW_BLOCK_SIZE - offset))
847 } while (addr && (addr != end_of_log));
849 wake_up_interruptible(&wl->fwlog_waitq);
855 static void wlcore_print_recovery(struct wl1271 *wl)
861 wl1271_info("Hardware recovery in progress. FW ver: %s",
862 wl->chip.fw_ver_str);
864 /* change partitions momentarily so we can read the FW pc */
865 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
869 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
873 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
877 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
879 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
883 static void wl1271_recovery_work(struct work_struct *work)
886 container_of(work, struct wl1271, recovery_work);
887 struct wl12xx_vif *wlvif;
888 struct ieee80211_vif *vif;
890 mutex_lock(&wl->mutex);
892 if (wl->state == WLCORE_STATE_OFF || wl->plt)
895 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
896 wl12xx_read_fwlog_panic(wl);
897 wlcore_print_recovery(wl);
900 BUG_ON(bug_on_recovery &&
901 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
904 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
909 * Advance security sequence number to overcome potential progress
910 * in the firmware during recovery. This doens't hurt if the network is
913 wl12xx_for_each_wlvif(wl, wlvif) {
914 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
915 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
916 wlvif->tx_security_seq +=
917 WL1271_TX_SQN_POST_RECOVERY_PADDING;
920 /* Prevent spurious TX during FW restart */
921 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
923 if (wl->sched_scanning) {
924 ieee80211_sched_scan_stopped(wl->hw);
925 wl->sched_scanning = false;
928 /* reboot the chipset */
929 while (!list_empty(&wl->wlvif_list)) {
930 wlvif = list_first_entry(&wl->wlvif_list,
931 struct wl12xx_vif, list);
932 vif = wl12xx_wlvif_to_vif(wlvif);
933 __wl1271_op_remove_interface(wl, vif, false);
936 wlcore_op_stop_locked(wl);
938 ieee80211_restart_hw(wl->hw);
941 * Its safe to enable TX now - the queues are stopped after a request
944 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
947 wl->watchdog_recovery = false;
948 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
949 mutex_unlock(&wl->mutex);
952 static int wlcore_fw_wakeup(struct wl1271 *wl)
954 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
957 static int wl1271_setup(struct wl1271 *wl)
959 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
960 sizeof(*wl->fw_status_2) +
961 wl->fw_status_priv_len, GFP_KERNEL);
962 if (!wl->fw_status_1)
965 wl->fw_status_2 = (struct wl_fw_status_2 *)
966 (((u8 *) wl->fw_status_1) +
967 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
969 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
970 if (!wl->tx_res_if) {
971 kfree(wl->fw_status_1);
978 static int wl12xx_set_power_on(struct wl1271 *wl)
982 msleep(WL1271_PRE_POWER_ON_SLEEP);
983 ret = wl1271_power_on(wl);
986 msleep(WL1271_POWER_ON_SLEEP);
990 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
994 /* ELP module wake up */
995 ret = wlcore_fw_wakeup(wl);
1003 wl1271_power_off(wl);
1007 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1011 ret = wl12xx_set_power_on(wl);
1016 * For wl127x based devices we could use the default block
1017 * size (512 bytes), but due to a bug in the sdio driver, we
1018 * need to set it explicitly after the chip is powered on. To
1019 * simplify the code and since the performance impact is
1020 * negligible, we use the same block size for all different
1023 * Check if the bus supports blocksize alignment and, if it
1024 * doesn't, make sure we don't have the quirk.
1026 if (!wl1271_set_block_size(wl))
1027 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1029 /* TODO: make sure the lower driver has set things up correctly */
1031 ret = wl1271_setup(wl);
1035 ret = wl12xx_fetch_firmware(wl, plt);
1043 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1045 int retries = WL1271_BOOT_RETRIES;
1046 struct wiphy *wiphy = wl->hw->wiphy;
1048 static const char* const PLT_MODE[] = {
1056 mutex_lock(&wl->mutex);
1058 wl1271_notice("power up");
1060 if (wl->state != WLCORE_STATE_OFF) {
1061 wl1271_error("cannot go into PLT state because not "
1062 "in off state: %d", wl->state);
1067 /* Indicate to lower levels that we are now in PLT mode */
1069 wl->plt_mode = plt_mode;
1073 ret = wl12xx_chip_wakeup(wl, true);
1077 ret = wl->ops->plt_init(wl);
1081 wl->state = WLCORE_STATE_ON;
1082 wl1271_notice("firmware booted in PLT mode %s (%s)",
1084 wl->chip.fw_ver_str);
1086 /* update hw/fw version info in wiphy struct */
1087 wiphy->hw_version = wl->chip.id;
1088 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1089 sizeof(wiphy->fw_version));
1094 wl1271_power_off(wl);
1098 wl->plt_mode = PLT_OFF;
1100 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1101 WL1271_BOOT_RETRIES);
1103 mutex_unlock(&wl->mutex);
1108 int wl1271_plt_stop(struct wl1271 *wl)
1112 wl1271_notice("power down");
1115 * Interrupts must be disabled before setting the state to OFF.
1116 * Otherwise, the interrupt handler might be called and exit without
1117 * reading the interrupt status.
1119 wlcore_disable_interrupts(wl);
1120 mutex_lock(&wl->mutex);
1122 mutex_unlock(&wl->mutex);
1125 * This will not necessarily enable interrupts as interrupts
1126 * may have been disabled when op_stop was called. It will,
1127 * however, balance the above call to disable_interrupts().
1129 wlcore_enable_interrupts(wl);
1131 wl1271_error("cannot power down because not in PLT "
1132 "state: %d", wl->state);
1137 mutex_unlock(&wl->mutex);
1139 wl1271_flush_deferred_work(wl);
1140 cancel_work_sync(&wl->netstack_work);
1141 cancel_work_sync(&wl->recovery_work);
1142 cancel_delayed_work_sync(&wl->elp_work);
1143 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1144 cancel_delayed_work_sync(&wl->connection_loss_work);
1146 mutex_lock(&wl->mutex);
1147 wl1271_power_off(wl);
1149 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1150 wl->state = WLCORE_STATE_OFF;
1152 wl->plt_mode = PLT_OFF;
1154 mutex_unlock(&wl->mutex);
1160 static void wl1271_op_tx(struct ieee80211_hw *hw,
1161 struct ieee80211_tx_control *control,
1162 struct sk_buff *skb)
1164 struct wl1271 *wl = hw->priv;
1165 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1166 struct ieee80211_vif *vif = info->control.vif;
1167 struct wl12xx_vif *wlvif = NULL;
1168 unsigned long flags;
1173 wlvif = wl12xx_vif_to_data(vif);
1175 mapping = skb_get_queue_mapping(skb);
1176 q = wl1271_tx_get_queue(mapping);
1178 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1180 spin_lock_irqsave(&wl->wl_lock, flags);
1183 * drop the packet if the link is invalid or the queue is stopped
1184 * for any reason but watermark. Watermark is a "soft"-stop so we
1185 * allow these packets through.
1187 if (hlid == WL12XX_INVALID_LINK_ID ||
1188 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1189 (wlcore_is_queue_stopped(wl, q) &&
1190 !wlcore_is_queue_stopped_by_reason(wl, q,
1191 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1192 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1193 ieee80211_free_txskb(hw, skb);
1197 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1199 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1201 wl->tx_queue_count[q]++;
1204 * The workqueue is slow to process the tx_queue and we need stop
1205 * the queue here, otherwise the queue will get too long.
1207 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1208 !wlcore_is_queue_stopped_by_reason(wl, q,
1209 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1210 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1211 wlcore_stop_queue_locked(wl, q,
1212 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1216 * The chip specific setup must run before the first TX packet -
1217 * before that, the tx_work will not be initialized!
1220 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1221 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1222 ieee80211_queue_work(wl->hw, &wl->tx_work);
1225 spin_unlock_irqrestore(&wl->wl_lock, flags);
1228 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1230 unsigned long flags;
1233 /* no need to queue a new dummy packet if one is already pending */
1234 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1237 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1239 spin_lock_irqsave(&wl->wl_lock, flags);
1240 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1241 wl->tx_queue_count[q]++;
1242 spin_unlock_irqrestore(&wl->wl_lock, flags);
1244 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1245 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1246 return wlcore_tx_work_locked(wl);
1249 * If the FW TX is busy, TX work will be scheduled by the threaded
1250 * interrupt handler function
1256 * The size of the dummy packet should be at least 1400 bytes. However, in
1257 * order to minimize the number of bus transactions, aligning it to 512 bytes
1258 * boundaries could be beneficial, performance wise
1260 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1262 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1264 struct sk_buff *skb;
1265 struct ieee80211_hdr_3addr *hdr;
1266 unsigned int dummy_packet_size;
1268 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1269 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1271 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1273 wl1271_warning("Failed to allocate a dummy packet skb");
1277 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1279 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1280 memset(hdr, 0, sizeof(*hdr));
1281 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1282 IEEE80211_STYPE_NULLFUNC |
1283 IEEE80211_FCTL_TODS);
1285 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1287 /* Dummy packets require the TID to be management */
1288 skb->priority = WL1271_TID_MGMT;
1290 /* Initialize all fields that might be used */
1291 skb_set_queue_mapping(skb, 0);
1292 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1300 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1302 int num_fields = 0, in_field = 0, fields_size = 0;
1303 int i, pattern_len = 0;
1306 wl1271_warning("No mask in WoWLAN pattern");
1311 * The pattern is broken up into segments of bytes at different offsets
1312 * that need to be checked by the FW filter. Each segment is called
1313 * a field in the FW API. We verify that the total number of fields
1314 * required for this pattern won't exceed FW limits (8)
1315 * as well as the total fields buffer won't exceed the FW limit.
1316 * Note that if there's a pattern which crosses Ethernet/IP header
1317 * boundary a new field is required.
1319 for (i = 0; i < p->pattern_len; i++) {
1320 if (test_bit(i, (unsigned long *)p->mask)) {
1325 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1327 fields_size += pattern_len +
1328 RX_FILTER_FIELD_OVERHEAD;
1336 fields_size += pattern_len +
1337 RX_FILTER_FIELD_OVERHEAD;
1344 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1348 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1349 wl1271_warning("RX Filter too complex. Too many segments");
1353 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1354 wl1271_warning("RX filter pattern is too big");
1361 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1363 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1366 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1373 for (i = 0; i < filter->num_fields; i++)
1374 kfree(filter->fields[i].pattern);
1379 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1380 u16 offset, u8 flags,
1381 u8 *pattern, u8 len)
1383 struct wl12xx_rx_filter_field *field;
1385 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1386 wl1271_warning("Max fields per RX filter. can't alloc another");
1390 field = &filter->fields[filter->num_fields];
1392 field->pattern = kzalloc(len, GFP_KERNEL);
1393 if (!field->pattern) {
1394 wl1271_warning("Failed to allocate RX filter pattern");
1398 filter->num_fields++;
1400 field->offset = cpu_to_le16(offset);
1401 field->flags = flags;
1403 memcpy(field->pattern, pattern, len);
1408 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1410 int i, fields_size = 0;
1412 for (i = 0; i < filter->num_fields; i++)
1413 fields_size += filter->fields[i].len +
1414 sizeof(struct wl12xx_rx_filter_field) -
1420 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1424 struct wl12xx_rx_filter_field *field;
1426 for (i = 0; i < filter->num_fields; i++) {
1427 field = (struct wl12xx_rx_filter_field *)buf;
1429 field->offset = filter->fields[i].offset;
1430 field->flags = filter->fields[i].flags;
1431 field->len = filter->fields[i].len;
1433 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1434 buf += sizeof(struct wl12xx_rx_filter_field) -
1435 sizeof(u8 *) + field->len;
1440 * Allocates an RX filter returned through f
1441 * which needs to be freed using rx_filter_free()
1443 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1444 struct cfg80211_wowlan_trig_pkt_pattern *p,
1445 struct wl12xx_rx_filter **f)
1448 struct wl12xx_rx_filter *filter;
1452 filter = wl1271_rx_filter_alloc();
1454 wl1271_warning("Failed to alloc rx filter");
1460 while (i < p->pattern_len) {
1461 if (!test_bit(i, (unsigned long *)p->mask)) {
1466 for (j = i; j < p->pattern_len; j++) {
1467 if (!test_bit(j, (unsigned long *)p->mask))
1470 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1471 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1475 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1477 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1479 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1480 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1485 ret = wl1271_rx_filter_alloc_field(filter,
1488 &p->pattern[i], len);
1495 filter->action = FILTER_SIGNAL;
1501 wl1271_rx_filter_free(filter);
1507 static int wl1271_configure_wowlan(struct wl1271 *wl,
1508 struct cfg80211_wowlan *wow)
1512 if (!wow || wow->any || !wow->n_patterns) {
1513 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1518 ret = wl1271_rx_filter_clear_all(wl);
1525 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1528 /* Validate all incoming patterns before clearing current FW state */
1529 for (i = 0; i < wow->n_patterns; i++) {
1530 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1532 wl1271_warning("Bad wowlan pattern %d", i);
1537 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1541 ret = wl1271_rx_filter_clear_all(wl);
1545 /* Translate WoWLAN patterns into filters */
1546 for (i = 0; i < wow->n_patterns; i++) {
1547 struct cfg80211_wowlan_trig_pkt_pattern *p;
1548 struct wl12xx_rx_filter *filter = NULL;
1550 p = &wow->patterns[i];
1552 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1554 wl1271_warning("Failed to create an RX filter from "
1555 "wowlan pattern %d", i);
1559 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1561 wl1271_rx_filter_free(filter);
1566 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1572 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1573 struct wl12xx_vif *wlvif,
1574 struct cfg80211_wowlan *wow)
1578 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1581 ret = wl1271_ps_elp_wakeup(wl);
1585 ret = wl1271_configure_wowlan(wl, wow);
1589 if ((wl->conf.conn.suspend_wake_up_event ==
1590 wl->conf.conn.wake_up_event) &&
1591 (wl->conf.conn.suspend_listen_interval ==
1592 wl->conf.conn.listen_interval))
1595 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1596 wl->conf.conn.suspend_wake_up_event,
1597 wl->conf.conn.suspend_listen_interval);
1600 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1603 wl1271_ps_elp_sleep(wl);
1609 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1610 struct wl12xx_vif *wlvif)
1614 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1617 ret = wl1271_ps_elp_wakeup(wl);
1621 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1623 wl1271_ps_elp_sleep(wl);
1629 static int wl1271_configure_suspend(struct wl1271 *wl,
1630 struct wl12xx_vif *wlvif,
1631 struct cfg80211_wowlan *wow)
1633 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1634 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1635 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1636 return wl1271_configure_suspend_ap(wl, wlvif);
1640 static void wl1271_configure_resume(struct wl1271 *wl,
1641 struct wl12xx_vif *wlvif)
1644 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1645 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1647 if ((!is_ap) && (!is_sta))
1650 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1653 ret = wl1271_ps_elp_wakeup(wl);
1658 wl1271_configure_wowlan(wl, NULL);
1660 if ((wl->conf.conn.suspend_wake_up_event ==
1661 wl->conf.conn.wake_up_event) &&
1662 (wl->conf.conn.suspend_listen_interval ==
1663 wl->conf.conn.listen_interval))
1666 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1667 wl->conf.conn.wake_up_event,
1668 wl->conf.conn.listen_interval);
1671 wl1271_error("resume: wake up conditions failed: %d",
1675 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1679 wl1271_ps_elp_sleep(wl);
1682 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1683 struct cfg80211_wowlan *wow)
1685 struct wl1271 *wl = hw->priv;
1686 struct wl12xx_vif *wlvif;
1689 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1692 /* we want to perform the recovery before suspending */
1693 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1694 wl1271_warning("postponing suspend to perform recovery");
1698 wl1271_tx_flush(wl);
1700 mutex_lock(&wl->mutex);
1701 wl->wow_enabled = true;
1702 wl12xx_for_each_wlvif(wl, wlvif) {
1703 ret = wl1271_configure_suspend(wl, wlvif, wow);
1705 mutex_unlock(&wl->mutex);
1706 wl1271_warning("couldn't prepare device to suspend");
1710 mutex_unlock(&wl->mutex);
1711 /* flush any remaining work */
1712 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1715 * disable and re-enable interrupts in order to flush
1718 wlcore_disable_interrupts(wl);
1721 * set suspended flag to avoid triggering a new threaded_irq
1722 * work. no need for spinlock as interrupts are disabled.
1724 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1726 wlcore_enable_interrupts(wl);
1727 flush_work(&wl->tx_work);
1728 flush_delayed_work(&wl->elp_work);
1733 static int wl1271_op_resume(struct ieee80211_hw *hw)
1735 struct wl1271 *wl = hw->priv;
1736 struct wl12xx_vif *wlvif;
1737 unsigned long flags;
1738 bool run_irq_work = false, pending_recovery;
1741 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1743 WARN_ON(!wl->wow_enabled);
1746 * re-enable irq_work enqueuing, and call irq_work directly if
1747 * there is a pending work.
1749 spin_lock_irqsave(&wl->wl_lock, flags);
1750 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1751 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1752 run_irq_work = true;
1753 spin_unlock_irqrestore(&wl->wl_lock, flags);
1755 mutex_lock(&wl->mutex);
1757 /* test the recovery flag before calling any SDIO functions */
1758 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1762 wl1271_debug(DEBUG_MAC80211,
1763 "run postponed irq_work directly");
1765 /* don't talk to the HW if recovery is pending */
1766 if (!pending_recovery) {
1767 ret = wlcore_irq_locked(wl);
1769 wl12xx_queue_recovery_work(wl);
1772 wlcore_enable_interrupts(wl);
1775 if (pending_recovery) {
1776 wl1271_warning("queuing forgotten recovery on resume");
1777 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1781 wl12xx_for_each_wlvif(wl, wlvif) {
1782 wl1271_configure_resume(wl, wlvif);
1786 wl->wow_enabled = false;
1787 mutex_unlock(&wl->mutex);
1793 static int wl1271_op_start(struct ieee80211_hw *hw)
1795 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1798 * We have to delay the booting of the hardware because
1799 * we need to know the local MAC address before downloading and
1800 * initializing the firmware. The MAC address cannot be changed
1801 * after boot, and without the proper MAC address, the firmware
1802 * will not function properly.
1804 * The MAC address is first known when the corresponding interface
1805 * is added. That is where we will initialize the hardware.
1811 static void wlcore_op_stop_locked(struct wl1271 *wl)
1815 if (wl->state == WLCORE_STATE_OFF) {
1816 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1818 wlcore_enable_interrupts(wl);
1824 * this must be before the cancel_work calls below, so that the work
1825 * functions don't perform further work.
1827 wl->state = WLCORE_STATE_OFF;
1830 * Use the nosync variant to disable interrupts, so the mutex could be
1831 * held while doing so without deadlocking.
1833 wlcore_disable_interrupts_nosync(wl);
1835 mutex_unlock(&wl->mutex);
1837 wlcore_synchronize_interrupts(wl);
1838 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1839 cancel_work_sync(&wl->recovery_work);
1840 wl1271_flush_deferred_work(wl);
1841 cancel_delayed_work_sync(&wl->scan_complete_work);
1842 cancel_work_sync(&wl->netstack_work);
1843 cancel_work_sync(&wl->tx_work);
1844 cancel_delayed_work_sync(&wl->elp_work);
1845 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1846 cancel_delayed_work_sync(&wl->connection_loss_work);
1848 /* let's notify MAC80211 about the remaining pending TX frames */
1849 wl12xx_tx_reset(wl);
1850 mutex_lock(&wl->mutex);
1852 wl1271_power_off(wl);
1854 * In case a recovery was scheduled, interrupts were disabled to avoid
1855 * an interrupt storm. Now that the power is down, it is safe to
1856 * re-enable interrupts to balance the disable depth
1858 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1859 wlcore_enable_interrupts(wl);
1861 wl->band = IEEE80211_BAND_2GHZ;
1864 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1865 wl->channel_type = NL80211_CHAN_NO_HT;
1866 wl->tx_blocks_available = 0;
1867 wl->tx_allocated_blocks = 0;
1868 wl->tx_results_count = 0;
1869 wl->tx_packets_count = 0;
1870 wl->time_offset = 0;
1871 wl->ap_fw_ps_map = 0;
1873 wl->sched_scanning = false;
1874 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1875 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1876 memset(wl->links_map, 0, sizeof(wl->links_map));
1877 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1878 wl->active_sta_count = 0;
1880 /* The system link is always allocated */
1881 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1884 * this is performed after the cancel_work calls and the associated
1885 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1886 * get executed before all these vars have been reset.
1890 wl->tx_blocks_freed = 0;
1892 for (i = 0; i < NUM_TX_QUEUES; i++) {
1893 wl->tx_pkts_freed[i] = 0;
1894 wl->tx_allocated_pkts[i] = 0;
1897 wl1271_debugfs_reset(wl);
1899 kfree(wl->fw_status_1);
1900 wl->fw_status_1 = NULL;
1901 wl->fw_status_2 = NULL;
1902 kfree(wl->tx_res_if);
1903 wl->tx_res_if = NULL;
1904 kfree(wl->target_mem_map);
1905 wl->target_mem_map = NULL;
1908 static void wlcore_op_stop(struct ieee80211_hw *hw)
1910 struct wl1271 *wl = hw->priv;
1912 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1914 mutex_lock(&wl->mutex);
1916 wlcore_op_stop_locked(wl);
1918 mutex_unlock(&wl->mutex);
1921 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1923 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1924 WL12XX_MAX_RATE_POLICIES);
1925 if (policy >= WL12XX_MAX_RATE_POLICIES)
1928 __set_bit(policy, wl->rate_policies_map);
1933 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1935 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1938 __clear_bit(*idx, wl->rate_policies_map);
1939 *idx = WL12XX_MAX_RATE_POLICIES;
1942 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
1944 u8 policy = find_first_zero_bit(wl->klv_templates_map,
1945 WLCORE_MAX_KLV_TEMPLATES);
1946 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
1949 __set_bit(policy, wl->klv_templates_map);
1954 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
1956 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
1959 __clear_bit(*idx, wl->klv_templates_map);
1960 *idx = WLCORE_MAX_KLV_TEMPLATES;
1963 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1965 switch (wlvif->bss_type) {
1966 case BSS_TYPE_AP_BSS:
1968 return WL1271_ROLE_P2P_GO;
1970 return WL1271_ROLE_AP;
1972 case BSS_TYPE_STA_BSS:
1974 return WL1271_ROLE_P2P_CL;
1976 return WL1271_ROLE_STA;
1979 return WL1271_ROLE_IBSS;
1982 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1984 return WL12XX_INVALID_ROLE_TYPE;
1987 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1989 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1992 /* clear everything but the persistent data */
1993 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1995 switch (ieee80211_vif_type_p2p(vif)) {
1996 case NL80211_IFTYPE_P2P_CLIENT:
1999 case NL80211_IFTYPE_STATION:
2000 wlvif->bss_type = BSS_TYPE_STA_BSS;
2002 case NL80211_IFTYPE_ADHOC:
2003 wlvif->bss_type = BSS_TYPE_IBSS;
2005 case NL80211_IFTYPE_P2P_GO:
2008 case NL80211_IFTYPE_AP:
2009 wlvif->bss_type = BSS_TYPE_AP_BSS;
2012 wlvif->bss_type = MAX_BSS_TYPE;
2016 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2017 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2018 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2020 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2021 wlvif->bss_type == BSS_TYPE_IBSS) {
2022 /* init sta/ibss data */
2023 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2024 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2025 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2026 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2027 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2028 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2029 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2030 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2033 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2034 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2035 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2036 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2037 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2038 wl12xx_allocate_rate_policy(wl,
2039 &wlvif->ap.ucast_rate_idx[i]);
2040 wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
2042 * TODO: check if basic_rate shouldn't be
2043 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2044 * instead (the same thing for STA above).
2046 wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
2047 /* TODO: this seems to be used only for STA, check it */
2048 wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
2051 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2052 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2053 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2056 * mac80211 configures some values globally, while we treat them
2057 * per-interface. thus, on init, we have to copy them from wl
2059 wlvif->band = wl->band;
2060 wlvif->channel = wl->channel;
2061 wlvif->power_level = wl->power_level;
2062 wlvif->channel_type = wl->channel_type;
2064 INIT_WORK(&wlvif->rx_streaming_enable_work,
2065 wl1271_rx_streaming_enable_work);
2066 INIT_WORK(&wlvif->rx_streaming_disable_work,
2067 wl1271_rx_streaming_disable_work);
2068 INIT_LIST_HEAD(&wlvif->list);
2070 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2071 (unsigned long) wlvif);
2075 static bool wl12xx_init_fw(struct wl1271 *wl)
2077 int retries = WL1271_BOOT_RETRIES;
2078 bool booted = false;
2079 struct wiphy *wiphy = wl->hw->wiphy;
2084 ret = wl12xx_chip_wakeup(wl, false);
2088 ret = wl->ops->boot(wl);
2092 ret = wl1271_hw_init(wl);
2100 mutex_unlock(&wl->mutex);
2101 /* Unlocking the mutex in the middle of handling is
2102 inherently unsafe. In this case we deem it safe to do,
2103 because we need to let any possibly pending IRQ out of
2104 the system (and while we are WLCORE_STATE_OFF the IRQ
2105 work function will not do anything.) Also, any other
2106 possible concurrent operations will fail due to the
2107 current state, hence the wl1271 struct should be safe. */
2108 wlcore_disable_interrupts(wl);
2109 wl1271_flush_deferred_work(wl);
2110 cancel_work_sync(&wl->netstack_work);
2111 mutex_lock(&wl->mutex);
2113 wl1271_power_off(wl);
2117 wl1271_error("firmware boot failed despite %d retries",
2118 WL1271_BOOT_RETRIES);
2122 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2124 /* update hw/fw version info in wiphy struct */
2125 wiphy->hw_version = wl->chip.id;
2126 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2127 sizeof(wiphy->fw_version));
2130 * Now we know if 11a is supported (info from the NVS), so disable
2131 * 11a channels if not supported
2133 if (!wl->enable_11a)
2134 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2136 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2137 wl->enable_11a ? "" : "not ");
2139 wl->state = WLCORE_STATE_ON;
2144 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2146 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2150 * Check whether a fw switch (i.e. moving from one loaded
2151 * fw to another) is needed. This function is also responsible
2152 * for updating wl->last_vif_count, so it must be called before
2153 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2156 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2157 struct vif_counter_data vif_counter_data,
2160 enum wl12xx_fw_type current_fw = wl->fw_type;
2161 u8 vif_count = vif_counter_data.counter;
2163 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2166 /* increase the vif count if this is a new vif */
2167 if (add && !vif_counter_data.cur_vif_running)
2170 wl->last_vif_count = vif_count;
2172 /* no need for fw change if the device is OFF */
2173 if (wl->state == WLCORE_STATE_OFF)
2176 /* no need for fw change if a single fw is used */
2177 if (!wl->mr_fw_name)
2180 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2182 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2189 * Enter "forced psm". Make sure the sta is in psm against the ap,
2190 * to make the fw switch a bit more disconnection-persistent.
2192 static void wl12xx_force_active_psm(struct wl1271 *wl)
2194 struct wl12xx_vif *wlvif;
2196 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2197 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2201 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2202 struct ieee80211_vif *vif)
2204 struct wl1271 *wl = hw->priv;
2205 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2206 struct vif_counter_data vif_count;
2209 bool booted = false;
2211 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2212 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2214 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2215 ieee80211_vif_type_p2p(vif), vif->addr);
2217 wl12xx_get_vif_count(hw, vif, &vif_count);
2219 mutex_lock(&wl->mutex);
2220 ret = wl1271_ps_elp_wakeup(wl);
2225 * in some very corner case HW recovery scenarios its possible to
2226 * get here before __wl1271_op_remove_interface is complete, so
2227 * opt out if that is the case.
2229 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2230 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2236 ret = wl12xx_init_vif_data(wl, vif);
2241 role_type = wl12xx_get_role_type(wl, wlvif);
2242 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2247 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2248 wl12xx_force_active_psm(wl);
2249 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2250 mutex_unlock(&wl->mutex);
2251 wl1271_recovery_work(&wl->recovery_work);
2256 * TODO: after the nvs issue will be solved, move this block
2257 * to start(), and make sure here the driver is ON.
2259 if (wl->state == WLCORE_STATE_OFF) {
2261 * we still need this in order to configure the fw
2262 * while uploading the nvs
2264 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2266 booted = wl12xx_init_fw(wl);
2273 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2274 role_type, &wlvif->role_id);
2278 ret = wl1271_init_vif_specific(wl, vif);
2282 list_add(&wlvif->list, &wl->wlvif_list);
2283 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2285 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2290 wl1271_ps_elp_sleep(wl);
2292 mutex_unlock(&wl->mutex);
2297 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2298 struct ieee80211_vif *vif,
2299 bool reset_tx_queues)
2301 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2303 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2305 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2307 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2310 /* because of hardware recovery, we may get here twice */
2311 if (wl->state == WLCORE_STATE_OFF)
2314 wl1271_info("down");
2316 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2317 wl->scan_vif == vif) {
2319 * Rearm the tx watchdog just before idling scan. This
2320 * prevents just-finished scans from triggering the watchdog
2322 wl12xx_rearm_tx_watchdog_locked(wl);
2324 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2325 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2326 wl->scan_vif = NULL;
2327 wl->scan.req = NULL;
2328 ieee80211_scan_completed(wl->hw, true);
2331 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2332 /* disable active roles */
2333 ret = wl1271_ps_elp_wakeup(wl);
2337 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2338 wlvif->bss_type == BSS_TYPE_IBSS) {
2339 if (wl12xx_dev_role_started(wlvif))
2340 wl12xx_stop_dev(wl, wlvif);
2343 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2347 wl1271_ps_elp_sleep(wl);
2350 /* clear all hlids (except system_hlid) */
2351 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2353 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2354 wlvif->bss_type == BSS_TYPE_IBSS) {
2355 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2356 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2357 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2358 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2359 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2361 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2362 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2363 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2364 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2365 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2366 wl12xx_free_rate_policy(wl,
2367 &wlvif->ap.ucast_rate_idx[i]);
2368 wl1271_free_ap_keys(wl, wlvif);
2371 dev_kfree_skb(wlvif->probereq);
2372 wlvif->probereq = NULL;
2373 wl12xx_tx_reset_wlvif(wl, wlvif);
2374 if (wl->last_wlvif == wlvif)
2375 wl->last_wlvif = NULL;
2376 list_del(&wlvif->list);
2377 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2378 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2379 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2387 * Last AP, have more stations. Configure sleep auth according to STA.
2388 * Don't do thin on unintended recovery.
2390 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2391 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2394 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2395 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2396 /* Configure for power according to debugfs */
2397 if (sta_auth != WL1271_PSM_ILLEGAL)
2398 wl1271_acx_sleep_auth(wl, sta_auth);
2399 /* Configure for power always on */
2400 else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
2401 wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
2402 /* Configure for ELP power saving */
2404 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2408 mutex_unlock(&wl->mutex);
2410 del_timer_sync(&wlvif->rx_streaming_timer);
2411 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2412 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2414 mutex_lock(&wl->mutex);
2417 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2418 struct ieee80211_vif *vif)
2420 struct wl1271 *wl = hw->priv;
2421 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2422 struct wl12xx_vif *iter;
2423 struct vif_counter_data vif_count;
2425 wl12xx_get_vif_count(hw, vif, &vif_count);
2426 mutex_lock(&wl->mutex);
2428 if (wl->state == WLCORE_STATE_OFF ||
2429 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2433 * wl->vif can be null here if someone shuts down the interface
2434 * just when hardware recovery has been started.
2436 wl12xx_for_each_wlvif(wl, iter) {
2440 __wl1271_op_remove_interface(wl, vif, true);
2443 WARN_ON(iter != wlvif);
2444 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2445 wl12xx_force_active_psm(wl);
2446 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2447 wl12xx_queue_recovery_work(wl);
2450 mutex_unlock(&wl->mutex);
2453 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2454 struct ieee80211_vif *vif,
2455 enum nl80211_iftype new_type, bool p2p)
2457 struct wl1271 *wl = hw->priv;
2460 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2461 wl1271_op_remove_interface(hw, vif);
2463 vif->type = new_type;
2465 ret = wl1271_op_add_interface(hw, vif);
2467 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2471 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2474 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2477 * One of the side effects of the JOIN command is that is clears
2478 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2479 * to a WPA/WPA2 access point will therefore kill the data-path.
2480 * Currently the only valid scenario for JOIN during association
2481 * is on roaming, in which case we will also be given new keys.
2482 * Keep the below message for now, unless it starts bothering
2483 * users who really like to roam a lot :)
2485 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2486 wl1271_info("JOIN while associated.");
2488 /* clear encryption type */
2489 wlvif->encryption_type = KEY_NONE;
2492 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2494 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2496 * TODO: this is an ugly workaround for wl12xx fw
2497 * bug - we are not able to tx/rx after the first
2498 * start_sta, so make dummy start+stop calls,
2499 * and then call start_sta again.
2500 * this should be fixed in the fw.
2502 wl12xx_cmd_role_start_sta(wl, wlvif);
2503 wl12xx_cmd_role_stop_sta(wl, wlvif);
2506 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2512 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2516 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2520 wl1271_error("No SSID in IEs!");
2525 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2526 wl1271_error("SSID is too long!");
2530 wlvif->ssid_len = ssid_len;
2531 memcpy(wlvif->ssid, ptr+2, ssid_len);
2535 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2537 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2538 struct sk_buff *skb;
2541 /* we currently only support setting the ssid from the ap probe req */
2542 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2545 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2549 ieoffset = offsetof(struct ieee80211_mgmt,
2550 u.probe_req.variable);
2551 wl1271_ssid_set(wlvif, skb, ieoffset);
2557 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2558 struct ieee80211_bss_conf *bss_conf)
2563 wlvif->aid = bss_conf->aid;
2564 wlvif->channel_type = bss_conf->channel_type;
2565 wlvif->beacon_int = bss_conf->beacon_int;
2567 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2570 * with wl1271, we don't need to update the
2571 * beacon_int and dtim_period, because the firmware
2572 * updates it by itself when the first beacon is
2573 * received after a join.
2575 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2580 * Get a template for hardware connection maintenance
2582 dev_kfree_skb(wlvif->probereq);
2583 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2586 ieoffset = offsetof(struct ieee80211_mgmt,
2587 u.probe_req.variable);
2588 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2590 /* enable the connection monitoring feature */
2591 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2596 * The join command disable the keep-alive mode, shut down its process,
2597 * and also clear the template config, so we need to reset it all after
2598 * the join. The acx_aid starts the keep-alive process, and the order
2599 * of the commands below is relevant.
2601 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2605 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2609 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2613 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2614 wlvif->sta.klv_template_id,
2615 ACX_KEEP_ALIVE_TPL_VALID);
2622 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2625 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2627 /* make sure we are connected (sta) joined */
2629 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2632 /* make sure we are joined (ibss) */
2634 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2638 /* use defaults when not associated */
2641 /* free probe-request template */
2642 dev_kfree_skb(wlvif->probereq);
2643 wlvif->probereq = NULL;
2645 /* disable connection monitor features */
2646 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2650 /* Disable the keep-alive feature */
2651 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2656 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2657 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2659 wl12xx_cmd_stop_channel_switch(wl);
2660 ieee80211_chswitch_done(vif, false);
2663 /* invalidate keep-alive template */
2664 wl1271_acx_keep_alive_config(wl, wlvif,
2665 wlvif->sta.klv_template_id,
2666 ACX_KEEP_ALIVE_TPL_INVALID);
2668 /* reset TX security counters on a clean disconnect */
2669 wlvif->tx_security_last_seq_lsb = 0;
2670 wlvif->tx_security_seq = 0;
2675 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2677 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2678 wlvif->rate_set = wlvif->basic_rate_set;
2681 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2682 struct ieee80211_conf *conf, u32 changed)
2684 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2687 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2689 /* if the channel changes while joined, join again */
2690 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2691 ((wlvif->band != conf->channel->band) ||
2692 (wlvif->channel != channel) ||
2693 (wlvif->channel_type != conf->channel_type))) {
2694 /* send all pending packets */
2695 ret = wlcore_tx_work_locked(wl);
2699 wlvif->band = conf->channel->band;
2700 wlvif->channel = channel;
2701 wlvif->channel_type = conf->channel_type;
2704 wl1271_set_band_rate(wl, wlvif);
2705 ret = wl1271_init_ap_rates(wl, wlvif);
2707 wl1271_error("AP rate policy change failed %d",
2711 * FIXME: the mac80211 should really provide a fixed
2712 * rate to use here. for now, just use the smallest
2713 * possible rate for the band as a fixed rate for
2714 * association frames and other control messages.
2716 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2717 wl1271_set_band_rate(wl, wlvif);
2720 wl1271_tx_min_rate_get(wl,
2721 wlvif->basic_rate_set);
2722 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2724 wl1271_warning("rate policy for channel "
2729 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2731 if ((conf->flags & IEEE80211_CONF_PS) &&
2732 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2733 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2738 if (wl->conf.conn.forced_ps) {
2739 ps_mode = STATION_POWER_SAVE_MODE;
2740 ps_mode_str = "forced";
2742 ps_mode = STATION_AUTO_PS_MODE;
2743 ps_mode_str = "auto";
2746 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2748 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2751 wl1271_warning("enter %s ps failed %d",
2754 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2755 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2757 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2759 ret = wl1271_ps_set_mode(wl, wlvif,
2760 STATION_ACTIVE_MODE);
2762 wl1271_warning("exit auto ps failed %d", ret);
2766 if (conf->power_level != wlvif->power_level) {
2767 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2771 wlvif->power_level = conf->power_level;
2777 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2779 struct wl1271 *wl = hw->priv;
2780 struct wl12xx_vif *wlvif;
2781 struct ieee80211_conf *conf = &hw->conf;
2782 int channel, ret = 0;
2784 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2786 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2789 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2791 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2795 * mac80211 will go to idle nearly immediately after transmitting some
2796 * frames, such as the deauth. To make sure those frames reach the air,
2797 * wait here until the TX queue is fully flushed.
2799 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
2800 ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2801 (conf->flags & IEEE80211_CONF_IDLE)))
2802 wl1271_tx_flush(wl);
2804 mutex_lock(&wl->mutex);
2806 /* we support configuring the channel and band even while off */
2807 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2808 wl->band = conf->channel->band;
2809 wl->channel = channel;
2810 wl->channel_type = conf->channel_type;
2813 if (changed & IEEE80211_CONF_CHANGE_POWER)
2814 wl->power_level = conf->power_level;
2816 if (unlikely(wl->state != WLCORE_STATE_ON))
2819 ret = wl1271_ps_elp_wakeup(wl);
2823 /* configure each interface */
2824 wl12xx_for_each_wlvif(wl, wlvif) {
2825 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2831 wl1271_ps_elp_sleep(wl);
2834 mutex_unlock(&wl->mutex);
2839 struct wl1271_filter_params {
2842 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2845 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2846 struct netdev_hw_addr_list *mc_list)
2848 struct wl1271_filter_params *fp;
2849 struct netdev_hw_addr *ha;
2851 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2853 wl1271_error("Out of memory setting filters.");
2857 /* update multicast filtering parameters */
2858 fp->mc_list_length = 0;
2859 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2860 fp->enabled = false;
2863 netdev_hw_addr_list_for_each(ha, mc_list) {
2864 memcpy(fp->mc_list[fp->mc_list_length],
2865 ha->addr, ETH_ALEN);
2866 fp->mc_list_length++;
2870 return (u64)(unsigned long)fp;
2873 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2876 FIF_BCN_PRBRESP_PROMISC | \
2880 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2881 unsigned int changed,
2882 unsigned int *total, u64 multicast)
2884 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2885 struct wl1271 *wl = hw->priv;
2886 struct wl12xx_vif *wlvif;
2890 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2891 " total %x", changed, *total);
2893 mutex_lock(&wl->mutex);
2895 *total &= WL1271_SUPPORTED_FILTERS;
2896 changed &= WL1271_SUPPORTED_FILTERS;
2898 if (unlikely(wl->state != WLCORE_STATE_ON))
2901 ret = wl1271_ps_elp_wakeup(wl);
2905 wl12xx_for_each_wlvif(wl, wlvif) {
2906 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2907 if (*total & FIF_ALLMULTI)
2908 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2912 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2915 fp->mc_list_length);
2922 * the fw doesn't provide an api to configure the filters. instead,
2923 * the filters configuration is based on the active roles / ROC
2928 wl1271_ps_elp_sleep(wl);
2931 mutex_unlock(&wl->mutex);
2935 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2936 u8 id, u8 key_type, u8 key_size,
2937 const u8 *key, u8 hlid, u32 tx_seq_32,
2940 struct wl1271_ap_key *ap_key;
2943 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2945 if (key_size > MAX_KEY_SIZE)
2949 * Find next free entry in ap_keys. Also check we are not replacing
2952 for (i = 0; i < MAX_NUM_KEYS; i++) {
2953 if (wlvif->ap.recorded_keys[i] == NULL)
2956 if (wlvif->ap.recorded_keys[i]->id == id) {
2957 wl1271_warning("trying to record key replacement");
2962 if (i == MAX_NUM_KEYS)
2965 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2970 ap_key->key_type = key_type;
2971 ap_key->key_size = key_size;
2972 memcpy(ap_key->key, key, key_size);
2973 ap_key->hlid = hlid;
2974 ap_key->tx_seq_32 = tx_seq_32;
2975 ap_key->tx_seq_16 = tx_seq_16;
2977 wlvif->ap.recorded_keys[i] = ap_key;
2981 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2985 for (i = 0; i < MAX_NUM_KEYS; i++) {
2986 kfree(wlvif->ap.recorded_keys[i]);
2987 wlvif->ap.recorded_keys[i] = NULL;
2991 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2994 struct wl1271_ap_key *key;
2995 bool wep_key_added = false;
2997 for (i = 0; i < MAX_NUM_KEYS; i++) {
2999 if (wlvif->ap.recorded_keys[i] == NULL)
3002 key = wlvif->ap.recorded_keys[i];
3004 if (hlid == WL12XX_INVALID_LINK_ID)
3005 hlid = wlvif->ap.bcast_hlid;
3007 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3008 key->id, key->key_type,
3009 key->key_size, key->key,
3010 hlid, key->tx_seq_32,
3015 if (key->key_type == KEY_WEP)
3016 wep_key_added = true;
3019 if (wep_key_added) {
3020 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3021 wlvif->ap.bcast_hlid);
3027 wl1271_free_ap_keys(wl, wlvif);
3031 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3032 u16 action, u8 id, u8 key_type,
3033 u8 key_size, const u8 *key, u32 tx_seq_32,
3034 u16 tx_seq_16, struct ieee80211_sta *sta)
3037 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3040 struct wl1271_station *wl_sta;
3044 wl_sta = (struct wl1271_station *)sta->drv_priv;
3045 hlid = wl_sta->hlid;
3047 hlid = wlvif->ap.bcast_hlid;
3050 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3052 * We do not support removing keys after AP shutdown.
3053 * Pretend we do to make mac80211 happy.
3055 if (action != KEY_ADD_OR_REPLACE)
3058 ret = wl1271_record_ap_key(wl, wlvif, id,
3060 key, hlid, tx_seq_32,
3063 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3064 id, key_type, key_size,
3065 key, hlid, tx_seq_32,
3073 static const u8 bcast_addr[ETH_ALEN] = {
3074 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3077 addr = sta ? sta->addr : bcast_addr;
3079 if (is_zero_ether_addr(addr)) {
3080 /* We dont support TX only encryption */
3084 /* The wl1271 does not allow to remove unicast keys - they
3085 will be cleared automatically on next CMD_JOIN. Ignore the
3086 request silently, as we dont want the mac80211 to emit
3087 an error message. */
3088 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3091 /* don't remove key if hlid was already deleted */
3092 if (action == KEY_REMOVE &&
3093 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3096 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3097 id, key_type, key_size,
3098 key, addr, tx_seq_32,
3103 /* the default WEP key needs to be configured at least once */
3104 if (key_type == KEY_WEP) {
3105 ret = wl12xx_cmd_set_default_wep_key(wl,
3116 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3117 struct ieee80211_vif *vif,
3118 struct ieee80211_sta *sta,
3119 struct ieee80211_key_conf *key_conf)
3121 struct wl1271 *wl = hw->priv;
3123 bool might_change_spare =
3124 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3125 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3127 if (might_change_spare) {
3129 * stop the queues and flush to ensure the next packets are
3130 * in sync with FW spare block accounting
3132 mutex_lock(&wl->mutex);
3133 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3134 mutex_unlock(&wl->mutex);
3136 wl1271_tx_flush(wl);
3139 mutex_lock(&wl->mutex);
3141 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3143 goto out_wake_queues;
3146 ret = wl1271_ps_elp_wakeup(wl);
3148 goto out_wake_queues;
3150 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3152 wl1271_ps_elp_sleep(wl);
3155 if (might_change_spare)
3156 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3158 mutex_unlock(&wl->mutex);
3163 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3164 struct ieee80211_vif *vif,
3165 struct ieee80211_sta *sta,
3166 struct ieee80211_key_conf *key_conf)
3168 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3174 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3176 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3177 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3178 key_conf->cipher, key_conf->keyidx,
3179 key_conf->keylen, key_conf->flags);
3180 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3182 switch (key_conf->cipher) {
3183 case WLAN_CIPHER_SUITE_WEP40:
3184 case WLAN_CIPHER_SUITE_WEP104:
3187 key_conf->hw_key_idx = key_conf->keyidx;
3189 case WLAN_CIPHER_SUITE_TKIP:
3190 key_type = KEY_TKIP;
3192 key_conf->hw_key_idx = key_conf->keyidx;
3193 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3194 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3196 case WLAN_CIPHER_SUITE_CCMP:
3199 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3200 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3201 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3203 case WL1271_CIPHER_SUITE_GEM:
3205 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3206 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3209 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3216 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3217 key_conf->keyidx, key_type,
3218 key_conf->keylen, key_conf->key,
3219 tx_seq_32, tx_seq_16, sta);
3221 wl1271_error("Could not add or replace key");
3226 * reconfiguring arp response if the unicast (or common)
3227 * encryption key type was changed
3229 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3230 (sta || key_type == KEY_WEP) &&
3231 wlvif->encryption_type != key_type) {
3232 wlvif->encryption_type = key_type;
3233 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3235 wl1271_warning("build arp rsp failed: %d", ret);
3242 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3243 key_conf->keyidx, key_type,
3244 key_conf->keylen, key_conf->key,
3247 wl1271_error("Could not remove key");
3253 wl1271_error("Unsupported key cmd 0x%x", cmd);
3259 EXPORT_SYMBOL_GPL(wlcore_set_key);
3261 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3262 struct ieee80211_vif *vif,
3263 struct cfg80211_scan_request *req)
3265 struct wl1271 *wl = hw->priv;
3270 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3273 ssid = req->ssids[0].ssid;
3274 len = req->ssids[0].ssid_len;
3277 mutex_lock(&wl->mutex);
3279 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3281 * We cannot return -EBUSY here because cfg80211 will expect
3282 * a call to ieee80211_scan_completed if we do - in this case
3283 * there won't be any call.
3289 ret = wl1271_ps_elp_wakeup(wl);
3293 /* fail if there is any role in ROC */
3294 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3295 /* don't allow scanning right now */
3300 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3302 wl1271_ps_elp_sleep(wl);
3304 mutex_unlock(&wl->mutex);
3309 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3310 struct ieee80211_vif *vif)
3312 struct wl1271 *wl = hw->priv;
3315 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3317 mutex_lock(&wl->mutex);
3319 if (unlikely(wl->state != WLCORE_STATE_ON))
3322 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3325 ret = wl1271_ps_elp_wakeup(wl);
3329 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3330 ret = wl1271_scan_stop(wl);
3336 * Rearm the tx watchdog just before idling scan. This
3337 * prevents just-finished scans from triggering the watchdog
3339 wl12xx_rearm_tx_watchdog_locked(wl);
3341 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3342 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3343 wl->scan_vif = NULL;
3344 wl->scan.req = NULL;
3345 ieee80211_scan_completed(wl->hw, true);
3348 wl1271_ps_elp_sleep(wl);
3350 mutex_unlock(&wl->mutex);
3352 cancel_delayed_work_sync(&wl->scan_complete_work);
3355 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3356 struct ieee80211_vif *vif,
3357 struct cfg80211_sched_scan_request *req,
3358 struct ieee80211_sched_scan_ies *ies)
3360 struct wl1271 *wl = hw->priv;
3361 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3364 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3366 mutex_lock(&wl->mutex);
3368 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3373 ret = wl1271_ps_elp_wakeup(wl);
3377 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3381 ret = wl1271_scan_sched_scan_start(wl, wlvif);
3385 wl->sched_scanning = true;
3388 wl1271_ps_elp_sleep(wl);
3390 mutex_unlock(&wl->mutex);
3394 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3395 struct ieee80211_vif *vif)
3397 struct wl1271 *wl = hw->priv;
3398 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3401 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3403 mutex_lock(&wl->mutex);
3405 if (unlikely(wl->state != WLCORE_STATE_ON))
3408 ret = wl1271_ps_elp_wakeup(wl);
3412 wl1271_scan_sched_scan_stop(wl, wlvif);
3414 wl1271_ps_elp_sleep(wl);
3416 mutex_unlock(&wl->mutex);
3419 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3421 struct wl1271 *wl = hw->priv;
3424 mutex_lock(&wl->mutex);
3426 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3431 ret = wl1271_ps_elp_wakeup(wl);
3435 ret = wl1271_acx_frag_threshold(wl, value);
3437 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3439 wl1271_ps_elp_sleep(wl);
3442 mutex_unlock(&wl->mutex);
3447 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3449 struct wl1271 *wl = hw->priv;
3450 struct wl12xx_vif *wlvif;
3453 mutex_lock(&wl->mutex);
3455 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3460 ret = wl1271_ps_elp_wakeup(wl);
3464 wl12xx_for_each_wlvif(wl, wlvif) {
3465 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3467 wl1271_warning("set rts threshold failed: %d", ret);
3469 wl1271_ps_elp_sleep(wl);
3472 mutex_unlock(&wl->mutex);
3477 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3480 const u8 *next, *end = skb->data + skb->len;
3481 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3482 skb->len - ieoffset);
3487 memmove(ie, next, end - next);
3488 skb_trim(skb, skb->len - len);
3491 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3492 unsigned int oui, u8 oui_type,
3496 const u8 *next, *end = skb->data + skb->len;
3497 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3498 skb->data + ieoffset,
3499 skb->len - ieoffset);
3504 memmove(ie, next, end - next);
3505 skb_trim(skb, skb->len - len);
3508 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3509 struct ieee80211_vif *vif)
3511 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3512 struct sk_buff *skb;
3515 skb = ieee80211_proberesp_get(wl->hw, vif);
3519 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3520 CMD_TEMPL_AP_PROBE_RESPONSE,
3529 wl1271_debug(DEBUG_AP, "probe response updated");
3530 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3536 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3537 struct ieee80211_vif *vif,
3539 size_t probe_rsp_len,
3542 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3543 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3544 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3545 int ssid_ie_offset, ie_offset, templ_len;
3548 /* no need to change probe response if the SSID is set correctly */
3549 if (wlvif->ssid_len > 0)
3550 return wl1271_cmd_template_set(wl, wlvif->role_id,
3551 CMD_TEMPL_AP_PROBE_RESPONSE,
3556 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3557 wl1271_error("probe_rsp template too big");
3561 /* start searching from IE offset */
3562 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3564 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3565 probe_rsp_len - ie_offset);
3567 wl1271_error("No SSID in beacon!");
3571 ssid_ie_offset = ptr - probe_rsp_data;
3572 ptr += (ptr[1] + 2);
3574 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3576 /* insert SSID from bss_conf */
3577 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3578 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3579 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3580 bss_conf->ssid, bss_conf->ssid_len);
3581 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3583 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3584 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3585 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3587 return wl1271_cmd_template_set(wl, wlvif->role_id,
3588 CMD_TEMPL_AP_PROBE_RESPONSE,
3594 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3595 struct ieee80211_vif *vif,
3596 struct ieee80211_bss_conf *bss_conf,
3599 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3602 if (changed & BSS_CHANGED_ERP_SLOT) {
3603 if (bss_conf->use_short_slot)
3604 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3606 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3608 wl1271_warning("Set slot time failed %d", ret);
3613 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3614 if (bss_conf->use_short_preamble)
3615 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3617 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3620 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3621 if (bss_conf->use_cts_prot)
3622 ret = wl1271_acx_cts_protect(wl, wlvif,
3625 ret = wl1271_acx_cts_protect(wl, wlvif,
3626 CTSPROTECT_DISABLE);
3628 wl1271_warning("Set ctsprotect failed %d", ret);
3637 static int wlcore_set_beacon_template(struct wl1271 *wl,
3638 struct ieee80211_vif *vif,
3641 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3642 struct ieee80211_hdr *hdr;
3645 int ieoffset = offsetof(struct ieee80211_mgmt,
3647 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3655 wl1271_debug(DEBUG_MASTER, "beacon updated");
3657 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3659 dev_kfree_skb(beacon);
3662 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3663 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3665 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3670 dev_kfree_skb(beacon);
3675 * In case we already have a probe-resp beacon set explicitly
3676 * by usermode, don't use the beacon data.
3678 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3681 /* remove TIM ie from probe response */
3682 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3685 * remove p2p ie from probe response.
3686 * the fw reponds to probe requests that don't include
3687 * the p2p ie. probe requests with p2p ie will be passed,
3688 * and will be responded by the supplicant (the spec
3689 * forbids including the p2p ie when responding to probe
3690 * requests that didn't include it).
3692 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3693 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3695 hdr = (struct ieee80211_hdr *) beacon->data;
3696 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3697 IEEE80211_STYPE_PROBE_RESP);
3699 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3704 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3705 CMD_TEMPL_PROBE_RESPONSE,
3710 dev_kfree_skb(beacon);
3718 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3719 struct ieee80211_vif *vif,
3720 struct ieee80211_bss_conf *bss_conf,
3723 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3724 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3727 if ((changed & BSS_CHANGED_BEACON_INT)) {
3728 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3729 bss_conf->beacon_int);
3731 wlvif->beacon_int = bss_conf->beacon_int;
3734 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3735 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3737 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3740 if ((changed & BSS_CHANGED_BEACON)) {
3741 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3748 wl1271_error("beacon info change failed: %d", ret);
3752 /* AP mode changes */
3753 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3754 struct ieee80211_vif *vif,
3755 struct ieee80211_bss_conf *bss_conf,
3758 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3761 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3762 u32 rates = bss_conf->basic_rates;
3764 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3766 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3767 wlvif->basic_rate_set);
3769 ret = wl1271_init_ap_rates(wl, wlvif);
3771 wl1271_error("AP rate policy change failed %d", ret);
3775 ret = wl1271_ap_init_templates(wl, vif);
3779 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3783 ret = wlcore_set_beacon_template(wl, vif, true);
3788 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3792 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3793 if (bss_conf->enable_beacon) {
3794 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3795 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3799 ret = wl1271_ap_init_hwenc(wl, wlvif);
3803 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3804 wl1271_debug(DEBUG_AP, "started AP");
3807 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3808 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3812 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3813 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3815 wl1271_debug(DEBUG_AP, "stopped AP");
3820 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3824 /* Handle HT information change */
3825 if ((changed & BSS_CHANGED_HT) &&
3826 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3827 ret = wl1271_acx_set_ht_information(wl, wlvif,
3828 bss_conf->ht_operation_mode);
3830 wl1271_warning("Set ht information failed %d", ret);
3839 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3840 struct ieee80211_bss_conf *bss_conf,
3846 wl1271_debug(DEBUG_MAC80211,
3847 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3848 bss_conf->bssid, bss_conf->aid,
3849 bss_conf->beacon_int,
3850 bss_conf->basic_rates, sta_rate_set);
3852 wlvif->beacon_int = bss_conf->beacon_int;
3853 rates = bss_conf->basic_rates;
3854 wlvif->basic_rate_set =
3855 wl1271_tx_enabled_rates_get(wl, rates,
3858 wl1271_tx_min_rate_get(wl,
3859 wlvif->basic_rate_set);
3863 wl1271_tx_enabled_rates_get(wl,
3867 /* we only support sched_scan while not connected */
3868 if (wl->sched_scanning) {
3869 wl1271_scan_sched_scan_stop(wl, wlvif);
3870 ieee80211_sched_scan_stopped(wl->hw);
3873 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3877 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3881 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
3885 wlcore_set_ssid(wl, wlvif);
3887 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3892 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3896 /* revert back to minimum rates for the current band */
3897 wl1271_set_band_rate(wl, wlvif);
3898 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3900 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3904 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3905 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
3906 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
3911 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3914 /* STA/IBSS mode changes */
3915 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3916 struct ieee80211_vif *vif,
3917 struct ieee80211_bss_conf *bss_conf,
3920 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3921 bool do_join = false;
3922 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3923 bool ibss_joined = false;
3924 u32 sta_rate_set = 0;
3926 struct ieee80211_sta *sta;
3927 bool sta_exists = false;
3928 struct ieee80211_sta_ht_cap sta_ht_cap;
3931 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3937 if (changed & BSS_CHANGED_IBSS) {
3938 if (bss_conf->ibss_joined) {
3939 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3942 wlcore_unset_assoc(wl, wlvif);
3943 wl12xx_cmd_role_stop_sta(wl, wlvif);
3947 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3950 /* Need to update the SSID (for filtering etc) */
3951 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3954 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3955 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3956 bss_conf->enable_beacon ? "enabled" : "disabled");
3961 if ((changed & BSS_CHANGED_CQM)) {
3962 bool enable = false;
3963 if (bss_conf->cqm_rssi_thold)
3965 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3966 bss_conf->cqm_rssi_thold,
3967 bss_conf->cqm_rssi_hyst);
3970 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3973 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT)) {
3975 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3979 /* save the supp_rates of the ap */
3980 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3981 if (sta->ht_cap.ht_supported)
3983 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3984 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3985 sta_ht_cap = sta->ht_cap;
3992 if (changed & BSS_CHANGED_BSSID) {
3993 if (!is_zero_ether_addr(bss_conf->bssid)) {
3994 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
3999 /* Need to update the BSSID (for filtering etc) */
4002 ret = wlcore_clear_bssid(wl, wlvif);
4008 if (changed & BSS_CHANGED_IBSS) {
4009 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4010 bss_conf->ibss_joined);
4012 if (bss_conf->ibss_joined) {
4013 u32 rates = bss_conf->basic_rates;
4014 wlvif->basic_rate_set =
4015 wl1271_tx_enabled_rates_get(wl, rates,
4018 wl1271_tx_min_rate_get(wl,
4019 wlvif->basic_rate_set);
4021 /* by default, use 11b + OFDM rates */
4022 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4023 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4029 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4034 ret = wlcore_join(wl, wlvif);
4036 wl1271_warning("cmd join failed %d", ret);
4040 /* ROC until connected (after EAPOL exchange) */
4042 ret = wl12xx_roc(wl, wlvif, wlvif->role_id,
4043 wlvif->band, wlvif->channel);
4049 if (changed & BSS_CHANGED_ASSOC) {
4050 if (bss_conf->assoc) {
4051 ret = wlcore_set_assoc(wl, wlvif, bss_conf);
4055 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4056 wl12xx_set_authorized(wl, wlvif);
4058 wlcore_unset_assoc(wl, wlvif);
4062 /* Handle new association with HT. Do this after join. */
4064 if ((changed & BSS_CHANGED_HT) &&
4065 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
4066 ret = wl1271_acx_set_ht_capabilities(wl,
4071 wl1271_warning("Set ht cap true failed %d",
4076 /* handle new association without HT and disassociation */
4077 else if (changed & BSS_CHANGED_ASSOC) {
4078 ret = wl1271_acx_set_ht_capabilities(wl,
4083 wl1271_warning("Set ht cap false failed %d",
4090 /* Handle HT information change. Done after join. */
4091 if ((changed & BSS_CHANGED_HT) &&
4092 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
4093 ret = wl1271_acx_set_ht_information(wl, wlvif,
4094 bss_conf->ht_operation_mode);
4096 wl1271_warning("Set ht information failed %d", ret);
4101 /* Handle arp filtering. Done after join. */
4102 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4103 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4104 __be32 addr = bss_conf->arp_addr_list[0];
4105 wlvif->sta.qos = bss_conf->qos;
4106 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4108 if (bss_conf->arp_addr_cnt == 1 &&
4109 bss_conf->arp_filter_enabled) {
4110 wlvif->ip_addr = addr;
4112 * The template should have been configured only upon
4113 * association. however, it seems that the correct ip
4114 * isn't being set (when sending), so we have to
4115 * reconfigure the template upon every ip change.
4117 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4119 wl1271_warning("build arp rsp failed: %d", ret);
4123 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4124 (ACX_ARP_FILTER_ARP_FILTERING |
4125 ACX_ARP_FILTER_AUTO_ARP),
4129 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4140 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4141 struct ieee80211_vif *vif,
4142 struct ieee80211_bss_conf *bss_conf,
4145 struct wl1271 *wl = hw->priv;
4146 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4147 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4150 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
4154 * make sure to cancel pending disconnections if our association
4157 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4158 cancel_delayed_work_sync(&wl->connection_loss_work);
4160 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4161 !bss_conf->enable_beacon)
4162 wl1271_tx_flush(wl);
4164 mutex_lock(&wl->mutex);
4166 if (unlikely(wl->state != WLCORE_STATE_ON))
4169 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4172 ret = wl1271_ps_elp_wakeup(wl);
4177 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4179 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4181 wl1271_ps_elp_sleep(wl);
4184 mutex_unlock(&wl->mutex);
4187 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4188 struct ieee80211_vif *vif, u16 queue,
4189 const struct ieee80211_tx_queue_params *params)
4191 struct wl1271 *wl = hw->priv;
4192 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4196 mutex_lock(&wl->mutex);
4198 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4201 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4203 ps_scheme = CONF_PS_SCHEME_LEGACY;
4205 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4208 ret = wl1271_ps_elp_wakeup(wl);
4213 * the txop is confed in units of 32us by the mac80211,
4216 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4217 params->cw_min, params->cw_max,
4218 params->aifs, params->txop << 5);
4222 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4223 CONF_CHANNEL_TYPE_EDCF,
4224 wl1271_tx_get_queue(queue),
4225 ps_scheme, CONF_ACK_POLICY_LEGACY,
4229 wl1271_ps_elp_sleep(wl);
4232 mutex_unlock(&wl->mutex);
4237 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4238 struct ieee80211_vif *vif)
4241 struct wl1271 *wl = hw->priv;
4242 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4243 u64 mactime = ULLONG_MAX;
4246 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4248 mutex_lock(&wl->mutex);
4250 if (unlikely(wl->state != WLCORE_STATE_ON))
4253 ret = wl1271_ps_elp_wakeup(wl);
4257 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4262 wl1271_ps_elp_sleep(wl);
4265 mutex_unlock(&wl->mutex);
4269 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4270 struct survey_info *survey)
4272 struct ieee80211_conf *conf = &hw->conf;
4277 survey->channel = conf->channel;
4282 static int wl1271_allocate_sta(struct wl1271 *wl,
4283 struct wl12xx_vif *wlvif,
4284 struct ieee80211_sta *sta)
4286 struct wl1271_station *wl_sta;
4290 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4291 wl1271_warning("could not allocate HLID - too much stations");
4295 wl_sta = (struct wl1271_station *)sta->drv_priv;
4296 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4298 wl1271_warning("could not allocate HLID - too many links");
4302 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4303 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4304 wl->active_sta_count++;
4308 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4310 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4313 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4314 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4315 wl->links[hlid].ba_bitmap = 0;
4316 __clear_bit(hlid, &wl->ap_ps_map);
4317 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4318 wl12xx_free_link(wl, wlvif, &hlid);
4319 wl->active_sta_count--;
4322 * rearm the tx watchdog when the last STA is freed - give the FW a
4323 * chance to return STA-buffered packets before complaining.
4325 if (wl->active_sta_count == 0)
4326 wl12xx_rearm_tx_watchdog_locked(wl);
4329 static int wl12xx_sta_add(struct wl1271 *wl,
4330 struct wl12xx_vif *wlvif,
4331 struct ieee80211_sta *sta)
4333 struct wl1271_station *wl_sta;
4337 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4339 ret = wl1271_allocate_sta(wl, wlvif, sta);
4343 wl_sta = (struct wl1271_station *)sta->drv_priv;
4344 hlid = wl_sta->hlid;
4346 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4348 wl1271_free_sta(wl, wlvif, hlid);
4353 static int wl12xx_sta_remove(struct wl1271 *wl,
4354 struct wl12xx_vif *wlvif,
4355 struct ieee80211_sta *sta)
4357 struct wl1271_station *wl_sta;
4360 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4362 wl_sta = (struct wl1271_station *)sta->drv_priv;
4364 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4367 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4371 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4375 static int wl12xx_update_sta_state(struct wl1271 *wl,
4376 struct wl12xx_vif *wlvif,
4377 struct ieee80211_sta *sta,
4378 enum ieee80211_sta_state old_state,
4379 enum ieee80211_sta_state new_state)
4381 struct wl1271_station *wl_sta;
4383 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4384 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4387 wl_sta = (struct wl1271_station *)sta->drv_priv;
4388 hlid = wl_sta->hlid;
4390 /* Add station (AP mode) */
4392 old_state == IEEE80211_STA_NOTEXIST &&
4393 new_state == IEEE80211_STA_NONE)
4394 return wl12xx_sta_add(wl, wlvif, sta);
4396 /* Remove station (AP mode) */
4398 old_state == IEEE80211_STA_NONE &&
4399 new_state == IEEE80211_STA_NOTEXIST) {
4401 wl12xx_sta_remove(wl, wlvif, sta);
4405 /* Authorize station (AP mode) */
4407 new_state == IEEE80211_STA_AUTHORIZED) {
4408 ret = wl12xx_cmd_set_peer_state(wl, hlid);
4412 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4417 /* Authorize station */
4419 new_state == IEEE80211_STA_AUTHORIZED) {
4420 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4421 return wl12xx_set_authorized(wl, wlvif);
4425 old_state == IEEE80211_STA_AUTHORIZED &&
4426 new_state == IEEE80211_STA_ASSOC) {
4427 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4428 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4435 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4436 struct ieee80211_vif *vif,
4437 struct ieee80211_sta *sta,
4438 enum ieee80211_sta_state old_state,
4439 enum ieee80211_sta_state new_state)
4441 struct wl1271 *wl = hw->priv;
4442 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4445 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4446 sta->aid, old_state, new_state);
4448 mutex_lock(&wl->mutex);
4450 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4455 ret = wl1271_ps_elp_wakeup(wl);
4459 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4461 wl1271_ps_elp_sleep(wl);
4463 mutex_unlock(&wl->mutex);
4464 if (new_state < old_state)
4469 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4470 struct ieee80211_vif *vif,
4471 enum ieee80211_ampdu_mlme_action action,
4472 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4475 struct wl1271 *wl = hw->priv;
4476 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4478 u8 hlid, *ba_bitmap;
4480 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4483 /* sanity check - the fields in FW are only 8bits wide */
4484 if (WARN_ON(tid > 0xFF))
4487 mutex_lock(&wl->mutex);
4489 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4494 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4495 hlid = wlvif->sta.hlid;
4496 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4497 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4498 struct wl1271_station *wl_sta;
4500 wl_sta = (struct wl1271_station *)sta->drv_priv;
4501 hlid = wl_sta->hlid;
4502 ba_bitmap = &wl->links[hlid].ba_bitmap;
4508 ret = wl1271_ps_elp_wakeup(wl);
4512 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4516 case IEEE80211_AMPDU_RX_START:
4517 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4522 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4524 wl1271_error("exceeded max RX BA sessions");
4528 if (*ba_bitmap & BIT(tid)) {
4530 wl1271_error("cannot enable RX BA session on active "
4535 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4538 *ba_bitmap |= BIT(tid);
4539 wl->ba_rx_session_count++;
4543 case IEEE80211_AMPDU_RX_STOP:
4544 if (!(*ba_bitmap & BIT(tid))) {
4546 * this happens on reconfig - so only output a debug
4547 * message for now, and don't fail the function.
4549 wl1271_debug(DEBUG_MAC80211,
4550 "no active RX BA session on tid: %d",
4556 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4559 *ba_bitmap &= ~BIT(tid);
4560 wl->ba_rx_session_count--;
4565 * The BA initiator session management in FW independently.
4566 * Falling break here on purpose for all TX APDU commands.
4568 case IEEE80211_AMPDU_TX_START:
4569 case IEEE80211_AMPDU_TX_STOP:
4570 case IEEE80211_AMPDU_TX_OPERATIONAL:
4575 wl1271_error("Incorrect ampdu action id=%x\n", action);
4579 wl1271_ps_elp_sleep(wl);
4582 mutex_unlock(&wl->mutex);
4587 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4588 struct ieee80211_vif *vif,
4589 const struct cfg80211_bitrate_mask *mask)
4591 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4592 struct wl1271 *wl = hw->priv;
4595 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4596 mask->control[NL80211_BAND_2GHZ].legacy,
4597 mask->control[NL80211_BAND_5GHZ].legacy);
4599 mutex_lock(&wl->mutex);
4601 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4602 wlvif->bitrate_masks[i] =
4603 wl1271_tx_enabled_rates_get(wl,
4604 mask->control[i].legacy,
4607 if (unlikely(wl->state != WLCORE_STATE_ON))
4610 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4611 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4613 ret = wl1271_ps_elp_wakeup(wl);
4617 wl1271_set_band_rate(wl, wlvif);
4619 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4620 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4622 wl1271_ps_elp_sleep(wl);
4625 mutex_unlock(&wl->mutex);
4630 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4631 struct ieee80211_channel_switch *ch_switch)
4633 struct wl1271 *wl = hw->priv;
4634 struct wl12xx_vif *wlvif;
4637 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4639 wl1271_tx_flush(wl);
4641 mutex_lock(&wl->mutex);
4643 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4644 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4645 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4646 ieee80211_chswitch_done(vif, false);
4649 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4653 ret = wl1271_ps_elp_wakeup(wl);
4657 /* TODO: change mac80211 to pass vif as param */
4658 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4659 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4662 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4665 wl1271_ps_elp_sleep(wl);
4668 mutex_unlock(&wl->mutex);
4671 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4673 struct wl1271 *wl = hw->priv;
4675 wl1271_tx_flush(wl);
4678 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4679 struct ieee80211_vif *vif,
4680 struct ieee80211_channel *chan,
4681 enum nl80211_channel_type channel_type,
4684 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4685 struct wl1271 *wl = hw->priv;
4686 int channel, ret = 0;
4688 channel = ieee80211_frequency_to_channel(chan->center_freq);
4690 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4691 channel, wlvif->role_id);
4693 mutex_lock(&wl->mutex);
4695 if (unlikely(wl->state != WLCORE_STATE_ON))
4698 /* return EBUSY if we can't ROC right now */
4699 if (WARN_ON(wl->roc_vif ||
4700 find_first_bit(wl->roc_map,
4701 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4706 ret = wl1271_ps_elp_wakeup(wl);
4710 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4715 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4716 msecs_to_jiffies(duration));
4718 wl1271_ps_elp_sleep(wl);
4720 mutex_unlock(&wl->mutex);
4724 static int __wlcore_roc_completed(struct wl1271 *wl)
4726 struct wl12xx_vif *wlvif;
4729 /* already completed */
4730 if (unlikely(!wl->roc_vif))
4733 wlvif = wl12xx_vif_to_data(wl->roc_vif);
4735 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4738 ret = wl12xx_stop_dev(wl, wlvif);
4747 static int wlcore_roc_completed(struct wl1271 *wl)
4751 wl1271_debug(DEBUG_MAC80211, "roc complete");
4753 mutex_lock(&wl->mutex);
4755 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4760 ret = wl1271_ps_elp_wakeup(wl);
4764 ret = __wlcore_roc_completed(wl);
4766 wl1271_ps_elp_sleep(wl);
4768 mutex_unlock(&wl->mutex);
4773 static void wlcore_roc_complete_work(struct work_struct *work)
4775 struct delayed_work *dwork;
4779 dwork = container_of(work, struct delayed_work, work);
4780 wl = container_of(dwork, struct wl1271, roc_complete_work);
4782 ret = wlcore_roc_completed(wl);
4784 ieee80211_remain_on_channel_expired(wl->hw);
4787 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
4789 struct wl1271 *wl = hw->priv;
4791 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
4794 wl1271_tx_flush(wl);
4797 * we can't just flush_work here, because it might deadlock
4798 * (as we might get called from the same workqueue)
4800 cancel_delayed_work_sync(&wl->roc_complete_work);
4801 wlcore_roc_completed(wl);
4806 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4808 struct wl1271 *wl = hw->priv;
4811 mutex_lock(&wl->mutex);
4813 if (unlikely(wl->state != WLCORE_STATE_ON))
4816 /* packets are considered pending if in the TX queue or the FW */
4817 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4819 mutex_unlock(&wl->mutex);
4824 /* can't be const, mac80211 writes to this */
4825 static struct ieee80211_rate wl1271_rates[] = {
4827 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4828 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4830 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4831 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4832 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4834 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4835 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4836 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4838 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4839 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4840 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4842 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4843 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4845 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4846 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4848 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4849 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4851 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4852 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4854 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4855 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4857 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4858 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4860 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4861 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4863 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4864 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4867 /* can't be const, mac80211 writes to this */
4868 static struct ieee80211_channel wl1271_channels[] = {
4869 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4870 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4871 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4872 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4873 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4874 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4875 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4876 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4877 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4878 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4879 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4880 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4881 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4882 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4885 /* can't be const, mac80211 writes to this */
4886 static struct ieee80211_supported_band wl1271_band_2ghz = {
4887 .channels = wl1271_channels,
4888 .n_channels = ARRAY_SIZE(wl1271_channels),
4889 .bitrates = wl1271_rates,
4890 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4893 /* 5 GHz data rates for WL1273 */
4894 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4896 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4897 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4899 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4900 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4902 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4903 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4905 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4906 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4908 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4909 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4911 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4912 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4914 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4915 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4917 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4918 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4921 /* 5 GHz band channels for WL1273 */
4922 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4923 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4924 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4925 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4926 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4927 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4928 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4929 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4930 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4931 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4932 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4933 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4934 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4935 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4936 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4937 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4938 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4939 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4940 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4941 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4942 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4943 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4944 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4945 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4946 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4947 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4948 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4949 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4950 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4951 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4952 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4953 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4954 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4955 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4956 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4959 static struct ieee80211_supported_band wl1271_band_5ghz = {
4960 .channels = wl1271_channels_5ghz,
4961 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4962 .bitrates = wl1271_rates_5ghz,
4963 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4966 static const struct ieee80211_ops wl1271_ops = {
4967 .start = wl1271_op_start,
4968 .stop = wlcore_op_stop,
4969 .add_interface = wl1271_op_add_interface,
4970 .remove_interface = wl1271_op_remove_interface,
4971 .change_interface = wl12xx_op_change_interface,
4973 .suspend = wl1271_op_suspend,
4974 .resume = wl1271_op_resume,
4976 .config = wl1271_op_config,
4977 .prepare_multicast = wl1271_op_prepare_multicast,
4978 .configure_filter = wl1271_op_configure_filter,
4980 .set_key = wlcore_op_set_key,
4981 .hw_scan = wl1271_op_hw_scan,
4982 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4983 .sched_scan_start = wl1271_op_sched_scan_start,
4984 .sched_scan_stop = wl1271_op_sched_scan_stop,
4985 .bss_info_changed = wl1271_op_bss_info_changed,
4986 .set_frag_threshold = wl1271_op_set_frag_threshold,
4987 .set_rts_threshold = wl1271_op_set_rts_threshold,
4988 .conf_tx = wl1271_op_conf_tx,
4989 .get_tsf = wl1271_op_get_tsf,
4990 .get_survey = wl1271_op_get_survey,
4991 .sta_state = wl12xx_op_sta_state,
4992 .ampdu_action = wl1271_op_ampdu_action,
4993 .tx_frames_pending = wl1271_tx_frames_pending,
4994 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4995 .channel_switch = wl12xx_op_channel_switch,
4996 .flush = wlcore_op_flush,
4997 .remain_on_channel = wlcore_op_remain_on_channel,
4998 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
4999 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5003 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5009 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5010 wl1271_error("Illegal RX rate from HW: %d", rate);
5014 idx = wl->band_rate_to_idx[band][rate];
5015 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5016 wl1271_error("Unsupported RX rate from HW: %d", rate);
5023 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5024 struct device_attribute *attr,
5027 struct wl1271 *wl = dev_get_drvdata(dev);
5032 mutex_lock(&wl->mutex);
5033 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5035 mutex_unlock(&wl->mutex);
5041 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5042 struct device_attribute *attr,
5043 const char *buf, size_t count)
5045 struct wl1271 *wl = dev_get_drvdata(dev);
5049 ret = kstrtoul(buf, 10, &res);
5051 wl1271_warning("incorrect value written to bt_coex_mode");
5055 mutex_lock(&wl->mutex);
5059 if (res == wl->sg_enabled)
5062 wl->sg_enabled = res;
5064 if (unlikely(wl->state != WLCORE_STATE_ON))
5067 ret = wl1271_ps_elp_wakeup(wl);
5071 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5072 wl1271_ps_elp_sleep(wl);
5075 mutex_unlock(&wl->mutex);
5079 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5080 wl1271_sysfs_show_bt_coex_state,
5081 wl1271_sysfs_store_bt_coex_state);
5083 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5084 struct device_attribute *attr,
5087 struct wl1271 *wl = dev_get_drvdata(dev);
5092 mutex_lock(&wl->mutex);
5093 if (wl->hw_pg_ver >= 0)
5094 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5096 len = snprintf(buf, len, "n/a\n");
5097 mutex_unlock(&wl->mutex);
5102 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5103 wl1271_sysfs_show_hw_pg_ver, NULL);
5105 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5106 struct bin_attribute *bin_attr,
5107 char *buffer, loff_t pos, size_t count)
5109 struct device *dev = container_of(kobj, struct device, kobj);
5110 struct wl1271 *wl = dev_get_drvdata(dev);
5114 ret = mutex_lock_interruptible(&wl->mutex);
5116 return -ERESTARTSYS;
5118 /* Let only one thread read the log at a time, blocking others */
5119 while (wl->fwlog_size == 0) {
5122 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5124 TASK_INTERRUPTIBLE);
5126 if (wl->fwlog_size != 0) {
5127 finish_wait(&wl->fwlog_waitq, &wait);
5131 mutex_unlock(&wl->mutex);
5134 finish_wait(&wl->fwlog_waitq, &wait);
5136 if (signal_pending(current))
5137 return -ERESTARTSYS;
5139 ret = mutex_lock_interruptible(&wl->mutex);
5141 return -ERESTARTSYS;
5144 /* Check if the fwlog is still valid */
5145 if (wl->fwlog_size < 0) {
5146 mutex_unlock(&wl->mutex);
5150 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5151 len = min(count, (size_t)wl->fwlog_size);
5152 wl->fwlog_size -= len;
5153 memcpy(buffer, wl->fwlog, len);
5155 /* Make room for new messages */
5156 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5158 mutex_unlock(&wl->mutex);
5163 static struct bin_attribute fwlog_attr = {
5164 .attr = {.name = "fwlog", .mode = S_IRUSR},
5165 .read = wl1271_sysfs_read_fwlog,
5168 static void wl1271_connection_loss_work(struct work_struct *work)
5170 struct delayed_work *dwork;
5172 struct ieee80211_vif *vif;
5173 struct wl12xx_vif *wlvif;
5175 dwork = container_of(work, struct delayed_work, work);
5176 wl = container_of(dwork, struct wl1271, connection_loss_work);
5178 wl1271_info("Connection loss work.");
5180 mutex_lock(&wl->mutex);
5182 if (unlikely(wl->state != WLCORE_STATE_ON))
5185 /* Call mac80211 connection loss */
5186 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5187 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5189 vif = wl12xx_wlvif_to_vif(wlvif);
5190 ieee80211_connection_loss(vif);
5193 mutex_unlock(&wl->mutex);
5196 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5200 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5203 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5204 wl1271_warning("NIC part of the MAC address wraps around!");
5206 for (i = 0; i < wl->num_mac_addr; i++) {
5207 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5208 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5209 wl->addresses[i].addr[2] = (u8) oui;
5210 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5211 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5212 wl->addresses[i].addr[5] = (u8) nic;
5216 /* we may be one address short at the most */
5217 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5220 * turn on the LAA bit in the first address and use it as
5223 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5224 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5225 memcpy(&wl->addresses[idx], &wl->addresses[0],
5226 sizeof(wl->addresses[0]));
5228 wl->addresses[idx].addr[2] |= BIT(1);
5231 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5232 wl->hw->wiphy->addresses = wl->addresses;
5235 static int wl12xx_get_hw_info(struct wl1271 *wl)
5239 ret = wl12xx_set_power_on(wl);
5243 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5247 wl->fuse_oui_addr = 0;
5248 wl->fuse_nic_addr = 0;
5250 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5254 if (wl->ops->get_mac)
5255 ret = wl->ops->get_mac(wl);
5258 wl1271_power_off(wl);
5262 static int wl1271_register_hw(struct wl1271 *wl)
5265 u32 oui_addr = 0, nic_addr = 0;
5267 if (wl->mac80211_registered)
5270 if (wl->nvs_len >= 12) {
5271 /* NOTE: The wl->nvs->nvs element must be first, in
5272 * order to simplify the casting, we assume it is at
5273 * the beginning of the wl->nvs structure.
5275 u8 *nvs_ptr = (u8 *)wl->nvs;
5278 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5280 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5283 /* if the MAC address is zeroed in the NVS derive from fuse */
5284 if (oui_addr == 0 && nic_addr == 0) {
5285 oui_addr = wl->fuse_oui_addr;
5286 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5287 nic_addr = wl->fuse_nic_addr + 1;
5290 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5292 ret = ieee80211_register_hw(wl->hw);
5294 wl1271_error("unable to register mac80211 hw: %d", ret);
5298 wl->mac80211_registered = true;
5300 wl1271_debugfs_init(wl);
5302 wl1271_notice("loaded");
5308 static void wl1271_unregister_hw(struct wl1271 *wl)
5311 wl1271_plt_stop(wl);
5313 ieee80211_unregister_hw(wl->hw);
5314 wl->mac80211_registered = false;
5318 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5321 .types = BIT(NL80211_IFTYPE_STATION),
5325 .types = BIT(NL80211_IFTYPE_AP) |
5326 BIT(NL80211_IFTYPE_P2P_GO) |
5327 BIT(NL80211_IFTYPE_P2P_CLIENT),
5331 static const struct ieee80211_iface_combination
5332 wlcore_iface_combinations[] = {
5334 .num_different_channels = 1,
5335 .max_interfaces = 3,
5336 .limits = wlcore_iface_limits,
5337 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5341 static int wl1271_init_ieee80211(struct wl1271 *wl)
5343 static const u32 cipher_suites[] = {
5344 WLAN_CIPHER_SUITE_WEP40,
5345 WLAN_CIPHER_SUITE_WEP104,
5346 WLAN_CIPHER_SUITE_TKIP,
5347 WLAN_CIPHER_SUITE_CCMP,
5348 WL1271_CIPHER_SUITE_GEM,
5351 /* The tx descriptor buffer */
5352 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5354 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5355 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5358 /* FIXME: find a proper value */
5359 wl->hw->channel_change_time = 10000;
5360 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5362 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5363 IEEE80211_HW_SUPPORTS_PS |
5364 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5365 IEEE80211_HW_SUPPORTS_UAPSD |
5366 IEEE80211_HW_HAS_RATE_CONTROL |
5367 IEEE80211_HW_CONNECTION_MONITOR |
5368 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5369 IEEE80211_HW_SPECTRUM_MGMT |
5370 IEEE80211_HW_AP_LINK_PS |
5371 IEEE80211_HW_AMPDU_AGGREGATION |
5372 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5373 IEEE80211_HW_SCAN_WHILE_IDLE;
5375 wl->hw->wiphy->cipher_suites = cipher_suites;
5376 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5378 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5379 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5380 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5381 wl->hw->wiphy->max_scan_ssids = 1;
5382 wl->hw->wiphy->max_sched_scan_ssids = 16;
5383 wl->hw->wiphy->max_match_sets = 16;
5385 * Maximum length of elements in scanning probe request templates
5386 * should be the maximum length possible for a template, without
5387 * the IEEE80211 header of the template
5389 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5390 sizeof(struct ieee80211_header);
5392 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5393 sizeof(struct ieee80211_header);
5395 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5397 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5398 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5400 /* make sure all our channels fit in the scanned_ch bitmask */
5401 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5402 ARRAY_SIZE(wl1271_channels_5ghz) >
5403 WL1271_MAX_CHANNELS);
5405 * We keep local copies of the band structs because we need to
5406 * modify them on a per-device basis.
5408 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5409 sizeof(wl1271_band_2ghz));
5410 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5411 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5412 sizeof(*wl->ht_cap));
5413 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5414 sizeof(wl1271_band_5ghz));
5415 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5416 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5417 sizeof(*wl->ht_cap));
5419 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5420 &wl->bands[IEEE80211_BAND_2GHZ];
5421 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5422 &wl->bands[IEEE80211_BAND_5GHZ];
5425 wl->hw->max_rates = 1;
5427 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5429 /* the FW answers probe-requests in AP-mode */
5430 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5431 wl->hw->wiphy->probe_resp_offload =
5432 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5433 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5434 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5436 /* allowed interface combinations */
5437 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5438 wl->hw->wiphy->n_iface_combinations =
5439 ARRAY_SIZE(wlcore_iface_combinations);
5441 SET_IEEE80211_DEV(wl->hw, wl->dev);
5443 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5444 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5446 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5451 #define WL1271_DEFAULT_CHANNEL 0
5453 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
5455 struct ieee80211_hw *hw;
5460 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5462 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5464 wl1271_error("could not alloc ieee80211_hw");
5470 memset(wl, 0, sizeof(*wl));
5472 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5474 wl1271_error("could not alloc wl priv");
5476 goto err_priv_alloc;
5479 INIT_LIST_HEAD(&wl->wlvif_list);
5483 for (i = 0; i < NUM_TX_QUEUES; i++)
5484 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5485 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5487 skb_queue_head_init(&wl->deferred_rx_queue);
5488 skb_queue_head_init(&wl->deferred_tx_queue);
5490 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5491 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5492 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5493 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5494 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5495 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5496 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5497 INIT_DELAYED_WORK(&wl->connection_loss_work,
5498 wl1271_connection_loss_work);
5500 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5501 if (!wl->freezable_wq) {
5506 wl->channel = WL1271_DEFAULT_CHANNEL;
5508 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5509 wl->band = IEEE80211_BAND_2GHZ;
5510 wl->channel_type = NL80211_CHAN_NO_HT;
5512 wl->sg_enabled = true;
5513 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5516 wl->ap_fw_ps_map = 0;
5518 wl->platform_quirks = 0;
5519 wl->sched_scanning = false;
5520 wl->system_hlid = WL12XX_SYSTEM_HLID;
5521 wl->active_sta_count = 0;
5523 init_waitqueue_head(&wl->fwlog_waitq);
5525 /* The system link is always allocated */
5526 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5528 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5529 for (i = 0; i < wl->num_tx_desc; i++)
5530 wl->tx_frames[i] = NULL;
5532 spin_lock_init(&wl->wl_lock);
5534 wl->state = WLCORE_STATE_OFF;
5535 wl->fw_type = WL12XX_FW_TYPE_NONE;
5536 mutex_init(&wl->mutex);
5537 mutex_init(&wl->flush_mutex);
5538 init_completion(&wl->nvs_loading_complete);
5540 order = get_order(aggr_buf_size);
5541 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5542 if (!wl->aggr_buf) {
5546 wl->aggr_buf_size = aggr_buf_size;
5548 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5549 if (!wl->dummy_packet) {
5554 /* Allocate one page for the FW log */
5555 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5558 goto err_dummy_packet;
5561 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
5570 free_page((unsigned long)wl->fwlog);
5573 dev_kfree_skb(wl->dummy_packet);
5576 free_pages((unsigned long)wl->aggr_buf, order);
5579 destroy_workqueue(wl->freezable_wq);
5582 wl1271_debugfs_exit(wl);
5586 ieee80211_free_hw(hw);
5590 return ERR_PTR(ret);
5592 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5594 int wlcore_free_hw(struct wl1271 *wl)
5596 /* Unblock any fwlog readers */
5597 mutex_lock(&wl->mutex);
5598 wl->fwlog_size = -1;
5599 wake_up_interruptible_all(&wl->fwlog_waitq);
5600 mutex_unlock(&wl->mutex);
5602 device_remove_bin_file(wl->dev, &fwlog_attr);
5604 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5606 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5607 free_page((unsigned long)wl->fwlog);
5608 dev_kfree_skb(wl->dummy_packet);
5609 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5611 wl1271_debugfs_exit(wl);
5615 wl->fw_type = WL12XX_FW_TYPE_NONE;
5619 kfree(wl->fw_status_1);
5620 kfree(wl->tx_res_if);
5621 destroy_workqueue(wl->freezable_wq);
5624 ieee80211_free_hw(wl->hw);
5628 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5630 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5632 struct wl1271 *wl = cookie;
5633 unsigned long flags;
5635 wl1271_debug(DEBUG_IRQ, "IRQ");
5637 /* complete the ELP completion */
5638 spin_lock_irqsave(&wl->wl_lock, flags);
5639 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5640 if (wl->elp_compl) {
5641 complete(wl->elp_compl);
5642 wl->elp_compl = NULL;
5645 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5646 /* don't enqueue a work right now. mark it as pending */
5647 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5648 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5649 disable_irq_nosync(wl->irq);
5650 pm_wakeup_event(wl->dev, 0);
5651 spin_unlock_irqrestore(&wl->wl_lock, flags);
5654 spin_unlock_irqrestore(&wl->wl_lock, flags);
5656 return IRQ_WAKE_THREAD;
5659 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5661 struct wl1271 *wl = context;
5662 struct platform_device *pdev = wl->pdev;
5663 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5664 unsigned long irqflags;
5668 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5670 wl1271_error("Could not allocate nvs data");
5673 wl->nvs_len = fw->size;
5675 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5681 ret = wl->ops->setup(wl);
5685 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5687 /* adjust some runtime configuration parameters */
5688 wlcore_adjust_conf(wl);
5690 wl->irq = platform_get_irq(pdev, 0);
5691 wl->platform_quirks = pdata->platform_quirks;
5692 wl->set_power = pdata->set_power;
5693 wl->if_ops = pdata->ops;
5695 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5696 irqflags = IRQF_TRIGGER_RISING;
5698 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5700 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5704 wl1271_error("request_irq() failed: %d", ret);
5709 ret = enable_irq_wake(wl->irq);
5711 wl->irq_wake_enabled = true;
5712 device_init_wakeup(wl->dev, 1);
5713 if (pdata->pwr_in_suspend) {
5714 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5715 wl->hw->wiphy->wowlan.n_patterns =
5716 WL1271_MAX_RX_FILTERS;
5717 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5718 wl->hw->wiphy->wowlan.pattern_max_len =
5719 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5723 disable_irq(wl->irq);
5725 ret = wl12xx_get_hw_info(wl);
5727 wl1271_error("couldn't get hw info");
5731 ret = wl->ops->identify_chip(wl);
5735 ret = wl1271_init_ieee80211(wl);
5739 ret = wl1271_register_hw(wl);
5743 /* Create sysfs file to control bt coex state */
5744 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5746 wl1271_error("failed to create sysfs file bt_coex_state");
5750 /* Create sysfs file to get HW PG version */
5751 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5753 wl1271_error("failed to create sysfs file hw_pg_ver");
5754 goto out_bt_coex_state;
5757 /* Create sysfs file for the FW log */
5758 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5760 wl1271_error("failed to create sysfs file fwlog");
5764 wl->initialized = true;
5768 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5771 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5774 wl1271_unregister_hw(wl);
5777 free_irq(wl->irq, wl);
5783 release_firmware(fw);
5784 complete_all(&wl->nvs_loading_complete);
5787 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5791 if (!wl->ops || !wl->ptable)
5794 wl->dev = &pdev->dev;
5796 platform_set_drvdata(pdev, wl);
5798 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
5799 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
5802 wl1271_error("request_firmware_nowait failed: %d", ret);
5803 complete_all(&wl->nvs_loading_complete);
5808 EXPORT_SYMBOL_GPL(wlcore_probe);
5810 int __devexit wlcore_remove(struct platform_device *pdev)
5812 struct wl1271 *wl = platform_get_drvdata(pdev);
5814 wait_for_completion(&wl->nvs_loading_complete);
5815 if (!wl->initialized)
5818 if (wl->irq_wake_enabled) {
5819 device_init_wakeup(wl->dev, 0);
5820 disable_irq_wake(wl->irq);
5822 wl1271_unregister_hw(wl);
5823 free_irq(wl->irq, wl);
5828 EXPORT_SYMBOL_GPL(wlcore_remove);
5830 u32 wl12xx_debug_level = DEBUG_NONE;
5831 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5832 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5833 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5835 module_param_named(fwlog, fwlog_param, charp, 0);
5836 MODULE_PARM_DESC(fwlog,
5837 "FW logger options: continuous, ondemand, dbgpins or disable");
5839 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5840 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5842 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5843 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5845 MODULE_LICENSE("GPL");
5846 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5847 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
5848 MODULE_FIRMWARE(WL12XX_NVS_NAME);