3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wl1271_op_stop(struct ieee80211_hw *hw);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
86 wl12xx_croc(wl, wlvif->role_id);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy *wiphy,
93 struct regulatory_request *request)
95 struct ieee80211_supported_band *band;
96 struct ieee80211_channel *ch;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
139 int period = wl->conf.rx_streaming.interval;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
167 mutex_lock(&wl->mutex);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
175 if (!wl->conf.rx_streaming.interval)
178 ret = wl1271_ps_elp_wakeup(wl);
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
191 wl1271_ps_elp_sleep(wl);
193 mutex_unlock(&wl->mutex);
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
203 mutex_lock(&wl->mutex);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
208 ret = wl1271_ps_elp_wakeup(wl);
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
217 wl1271_ps_elp_sleep(wl);
219 mutex_unlock(&wl->mutex);
222 static void wl1271_rx_streaming_timer(unsigned long data)
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 struct delayed_work *dwork;
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 mutex_lock(&wl->mutex);
251 if (unlikely(wl->state == WL1271_STATE_OFF))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
300 mutex_unlock(&wl->mutex);
303 static void wlcore_adjust_conf(struct wl1271 *wl)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
324 struct wl12xx_vif *wlvif,
327 bool fw_ps, single_sta;
329 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
330 single_sta = (wl->active_sta_count == 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
337 wl12xx_ps_link_end(wl, wlvif, hlid);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
345 wl12xx_ps_link_start(wl, wlvif, hlid, true);
348 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
349 struct wl12xx_vif *wlvif,
350 struct wl_fw_status_2 *status)
352 struct wl1271_link *lnk;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
359 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
360 wl1271_debug(DEBUG_PSM,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl->ap_fw_ps_map, cur_fw_ps_map,
363 wl->ap_fw_ps_map ^ cur_fw_ps_map);
365 wl->ap_fw_ps_map = cur_fw_ps_map;
368 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
369 lnk = &wl->links[hlid];
370 cnt = status->counters.tx_lnk_free_pkts[hlid] -
371 lnk->prev_freed_pkts;
373 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
374 lnk->allocated_pkts -= cnt;
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 lnk->allocated_pkts);
381 static void wl12xx_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
385 struct wl12xx_vif *wlvif;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
392 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
393 sizeof(*status_2) + wl->fw_status_priv_len;
395 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
401 status_1->fw_rx_counter,
402 status_1->drv_rx_counter,
403 status_1->tx_results_counter);
405 for (i = 0; i < NUM_TX_QUEUES; i++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl->tx_allocated_pkts[i] -=
408 (status_2->counters.tx_released_pkts[i] -
409 wl->tx_pkts_freed[i]) & 0xff;
411 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
414 /* prevent wrap-around in total blocks counter */
415 if (likely(wl->tx_blocks_freed <=
416 le32_to_cpu(status_2->total_released_blks)))
417 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
420 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
421 le32_to_cpu(status_2->total_released_blks);
423 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
425 wl->tx_allocated_blocks -= freed_blocks;
428 * If the FW freed some blocks:
429 * If we still have allocated blocks - re-arm the timer, Tx is
430 * not stuck. Otherwise, cancel the timer (no Tx currently).
433 if (wl->tx_allocated_blocks)
434 wl12xx_rearm_tx_watchdog_locked(wl);
436 cancel_delayed_work(&wl->tx_watchdog_work);
439 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
442 * The FW might change the total number of TX memblocks before
443 * we get a notification about blocks being released. Thus, the
444 * available blocks calculation might yield a temporary result
445 * which is lower than the actual available blocks. Keeping in
446 * mind that only blocks that were allocated can be moved from
447 * TX to RX, tx_blocks_available should never decrease here.
449 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
452 /* if more blocks are available now, tx work can be scheduled */
453 if (wl->tx_blocks_available > old_tx_blk_count)
454 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
456 /* for AP update num of allocated TX blocks per link and ps status */
457 wl12xx_for_each_wlvif_ap(wl, wlvif) {
458 wl12xx_irq_update_links_status(wl, wlvif, status_2);
461 /* update the host-chipset time offset */
463 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
464 (s64)le32_to_cpu(status_2->fw_localtime);
467 static void wl1271_flush_deferred_work(struct wl1271 *wl)
471 /* Pass all received frames to the network stack */
472 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
473 ieee80211_rx_ni(wl->hw, skb);
475 /* Return sent skbs to the network stack */
476 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
477 ieee80211_tx_status_ni(wl->hw, skb);
480 static void wl1271_netstack_work(struct work_struct *work)
483 container_of(work, struct wl1271, netstack_work);
486 wl1271_flush_deferred_work(wl);
487 } while (skb_queue_len(&wl->deferred_rx_queue));
490 #define WL1271_IRQ_MAX_LOOPS 256
492 static irqreturn_t wl1271_irq(int irq, void *cookie)
496 int loopcount = WL1271_IRQ_MAX_LOOPS;
497 struct wl1271 *wl = (struct wl1271 *)cookie;
499 unsigned int defer_count;
502 /* TX might be handled here, avoid redundant work */
503 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
504 cancel_work_sync(&wl->tx_work);
507 * In case edge triggered interrupt must be used, we cannot iterate
508 * more than once without introducing race conditions with the hardirq.
510 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
513 mutex_lock(&wl->mutex);
515 wl1271_debug(DEBUG_IRQ, "IRQ work");
517 if (unlikely(wl->state == WL1271_STATE_OFF))
520 ret = wl1271_ps_elp_wakeup(wl);
524 while (!done && loopcount--) {
526 * In order to avoid a race with the hardirq, clear the flag
527 * before acknowledging the chip. Since the mutex is held,
528 * wl1271_ps_elp_wakeup cannot be called concurrently.
530 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
531 smp_mb__after_clear_bit();
533 wl12xx_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
535 wlcore_hw_tx_immediate_compl(wl);
537 intr = le32_to_cpu(wl->fw_status_1->intr);
538 intr &= WL1271_INTR_MASK;
544 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
545 wl1271_error("watchdog interrupt received! "
546 "starting recovery.");
547 wl12xx_queue_recovery_work(wl);
549 /* restarting the chip. ignore any other interrupt. */
553 if (likely(intr & WL1271_ACX_INTR_DATA)) {
554 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
556 wl12xx_rx(wl, wl->fw_status_1);
558 /* Check if any tx blocks were freed */
559 spin_lock_irqsave(&wl->wl_lock, flags);
560 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
561 wl1271_tx_total_queue_count(wl) > 0) {
562 spin_unlock_irqrestore(&wl->wl_lock, flags);
564 * In order to avoid starvation of the TX path,
565 * call the work function directly.
567 wl1271_tx_work_locked(wl);
569 spin_unlock_irqrestore(&wl->wl_lock, flags);
572 /* check for tx results */
573 wlcore_hw_tx_delayed_compl(wl);
575 /* Make sure the deferred queues don't get too long */
576 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
577 skb_queue_len(&wl->deferred_rx_queue);
578 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
579 wl1271_flush_deferred_work(wl);
582 if (intr & WL1271_ACX_INTR_EVENT_A) {
583 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
584 wl1271_event_handle(wl, 0);
587 if (intr & WL1271_ACX_INTR_EVENT_B) {
588 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
589 wl1271_event_handle(wl, 1);
592 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
593 wl1271_debug(DEBUG_IRQ,
594 "WL1271_ACX_INTR_INIT_COMPLETE");
596 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
597 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
600 wl1271_ps_elp_sleep(wl);
603 spin_lock_irqsave(&wl->wl_lock, flags);
604 /* In case TX was not handled here, queue TX work */
605 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
606 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
607 wl1271_tx_total_queue_count(wl) > 0)
608 ieee80211_queue_work(wl->hw, &wl->tx_work);
609 spin_unlock_irqrestore(&wl->wl_lock, flags);
611 mutex_unlock(&wl->mutex);
616 struct vif_counter_data {
619 struct ieee80211_vif *cur_vif;
620 bool cur_vif_running;
623 static void wl12xx_vif_count_iter(void *data, u8 *mac,
624 struct ieee80211_vif *vif)
626 struct vif_counter_data *counter = data;
629 if (counter->cur_vif == vif)
630 counter->cur_vif_running = true;
633 /* caller must not hold wl->mutex, as it might deadlock */
634 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
635 struct ieee80211_vif *cur_vif,
636 struct vif_counter_data *data)
638 memset(data, 0, sizeof(*data));
639 data->cur_vif = cur_vif;
641 ieee80211_iterate_active_interfaces(hw,
642 wl12xx_vif_count_iter, data);
645 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
647 const struct firmware *fw;
649 enum wl12xx_fw_type fw_type;
653 fw_type = WL12XX_FW_TYPE_PLT;
654 fw_name = wl->plt_fw_name;
657 * we can't call wl12xx_get_vif_count() here because
658 * wl->mutex is taken, so use the cached last_vif_count value
660 if (wl->last_vif_count > 1) {
661 fw_type = WL12XX_FW_TYPE_MULTI;
662 fw_name = wl->mr_fw_name;
664 fw_type = WL12XX_FW_TYPE_NORMAL;
665 fw_name = wl->sr_fw_name;
669 if (wl->fw_type == fw_type)
672 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
674 ret = request_firmware(&fw, fw_name, wl->dev);
677 wl1271_error("could not get firmware %s: %d", fw_name, ret);
682 wl1271_error("firmware size is not multiple of 32 bits: %zu",
689 wl->fw_type = WL12XX_FW_TYPE_NONE;
690 wl->fw_len = fw->size;
691 wl->fw = vmalloc(wl->fw_len);
694 wl1271_error("could not allocate memory for the firmware");
699 memcpy(wl->fw, fw->data, wl->fw_len);
701 wl->fw_type = fw_type;
703 release_firmware(fw);
708 static int wl1271_fetch_nvs(struct wl1271 *wl)
710 const struct firmware *fw;
713 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
716 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
721 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
724 wl1271_error("could not allocate memory for the nvs file");
729 wl->nvs_len = fw->size;
732 release_firmware(fw);
737 void wl12xx_queue_recovery_work(struct wl1271 *wl)
739 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
740 ieee80211_queue_work(wl->hw, &wl->recovery_work);
743 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
747 /* The FW log is a length-value list, find where the log end */
748 while (len < maxlen) {
749 if (memblock[len] == 0)
751 if (len + memblock[len] + 1 > maxlen)
753 len += memblock[len] + 1;
756 /* Make sure we have enough room */
757 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
759 /* Fill the FW log file, consumed by the sysfs fwlog entry */
760 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
761 wl->fwlog_size += len;
766 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
772 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
773 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
774 (wl->conf.fwlog.mem_blocks == 0))
777 wl1271_info("Reading FW panic log");
779 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
784 * Make sure the chip is awake and the logger isn't active.
785 * This might fail if the firmware hanged.
787 if (!wl1271_ps_elp_wakeup(wl))
788 wl12xx_cmd_stop_fwlog(wl);
790 /* Read the first memory block address */
791 wl12xx_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
792 first_addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
796 /* Traverse the memory blocks linked list */
799 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
800 wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
804 * Memory blocks are linked to one another. The first 4 bytes
805 * of each memory block hold the hardware address of the next
806 * one. The last memory block points to the first one.
808 addr = le32_to_cpup((__le32 *)block);
809 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
810 WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
812 } while (addr && (addr != first_addr));
814 wake_up_interruptible(&wl->fwlog_waitq);
820 static void wl1271_recovery_work(struct work_struct *work)
823 container_of(work, struct wl1271, recovery_work);
824 struct wl12xx_vif *wlvif;
825 struct ieee80211_vif *vif;
827 mutex_lock(&wl->mutex);
829 if (wl->state != WL1271_STATE_ON || wl->plt)
832 /* Avoid a recursive recovery */
833 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
835 wl12xx_read_fwlog_panic(wl);
837 /* change partitions momentarily so we can read the FW pc */
838 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
839 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x "
842 wlcore_read_reg(wl, REG_PC_ON_RECOVERY),
843 wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR));
844 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
846 BUG_ON(bug_on_recovery &&
847 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
850 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
851 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
856 * Advance security sequence number to overcome potential progress
857 * in the firmware during recovery. This doens't hurt if the network is
860 wl12xx_for_each_wlvif(wl, wlvif) {
861 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
862 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
863 wlvif->tx_security_seq +=
864 WL1271_TX_SQN_POST_RECOVERY_PADDING;
867 /* Prevent spurious TX during FW restart */
868 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
870 if (wl->sched_scanning) {
871 ieee80211_sched_scan_stopped(wl->hw);
872 wl->sched_scanning = false;
875 /* reboot the chipset */
876 while (!list_empty(&wl->wlvif_list)) {
877 wlvif = list_first_entry(&wl->wlvif_list,
878 struct wl12xx_vif, list);
879 vif = wl12xx_wlvif_to_vif(wlvif);
880 __wl1271_op_remove_interface(wl, vif, false);
882 mutex_unlock(&wl->mutex);
883 wl1271_op_stop(wl->hw);
885 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
887 ieee80211_restart_hw(wl->hw);
890 * Its safe to enable TX now - the queues are stopped after a request
893 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
896 mutex_unlock(&wl->mutex);
899 static void wl1271_fw_wakeup(struct wl1271 *wl)
901 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
904 static int wl1271_setup(struct wl1271 *wl)
906 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
907 sizeof(*wl->fw_status_2) +
908 wl->fw_status_priv_len, GFP_KERNEL);
909 if (!wl->fw_status_1)
912 wl->fw_status_2 = (struct wl_fw_status_2 *)
913 (((u8 *) wl->fw_status_1) +
914 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
916 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
917 if (!wl->tx_res_if) {
918 kfree(wl->fw_status_1);
925 static int wl12xx_set_power_on(struct wl1271 *wl)
929 msleep(WL1271_PRE_POWER_ON_SLEEP);
930 ret = wl1271_power_on(wl);
933 msleep(WL1271_POWER_ON_SLEEP);
937 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
939 /* ELP module wake up */
940 wl1271_fw_wakeup(wl);
946 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
950 ret = wl12xx_set_power_on(wl);
955 * For wl127x based devices we could use the default block
956 * size (512 bytes), but due to a bug in the sdio driver, we
957 * need to set it explicitly after the chip is powered on. To
958 * simplify the code and since the performance impact is
959 * negligible, we use the same block size for all different
962 * Check if the bus supports blocksize alignment and, if it
963 * doesn't, make sure we don't have the quirk.
965 if (!wl1271_set_block_size(wl))
966 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
968 /* TODO: make sure the lower driver has set things up correctly */
970 ret = wl1271_setup(wl);
974 ret = wl12xx_fetch_firmware(wl, plt);
978 /* No NVS from netlink, try to get it from the filesystem */
979 if (wl->nvs == NULL) {
980 ret = wl1271_fetch_nvs(wl);
989 int wl1271_plt_start(struct wl1271 *wl)
991 int retries = WL1271_BOOT_RETRIES;
992 struct wiphy *wiphy = wl->hw->wiphy;
995 mutex_lock(&wl->mutex);
997 wl1271_notice("power up");
999 if (wl->state != WL1271_STATE_OFF) {
1000 wl1271_error("cannot go into PLT state because not "
1001 "in off state: %d", wl->state);
1008 ret = wl12xx_chip_wakeup(wl, true);
1012 ret = wl->ops->plt_init(wl);
1017 wl->state = WL1271_STATE_ON;
1018 wl1271_notice("firmware booted in PLT mode (%s)",
1019 wl->chip.fw_ver_str);
1021 /* update hw/fw version info in wiphy struct */
1022 wiphy->hw_version = wl->chip.id;
1023 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1024 sizeof(wiphy->fw_version));
1029 wl1271_power_off(wl);
1032 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1033 WL1271_BOOT_RETRIES);
1035 mutex_unlock(&wl->mutex);
1040 int wl1271_plt_stop(struct wl1271 *wl)
1044 wl1271_notice("power down");
1047 * Interrupts must be disabled before setting the state to OFF.
1048 * Otherwise, the interrupt handler might be called and exit without
1049 * reading the interrupt status.
1051 wlcore_disable_interrupts(wl);
1052 mutex_lock(&wl->mutex);
1054 mutex_unlock(&wl->mutex);
1057 * This will not necessarily enable interrupts as interrupts
1058 * may have been disabled when op_stop was called. It will,
1059 * however, balance the above call to disable_interrupts().
1061 wlcore_enable_interrupts(wl);
1063 wl1271_error("cannot power down because not in PLT "
1064 "state: %d", wl->state);
1069 mutex_unlock(&wl->mutex);
1071 wl1271_flush_deferred_work(wl);
1072 cancel_work_sync(&wl->netstack_work);
1073 cancel_work_sync(&wl->recovery_work);
1074 cancel_delayed_work_sync(&wl->elp_work);
1075 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1076 cancel_delayed_work_sync(&wl->connection_loss_work);
1078 mutex_lock(&wl->mutex);
1079 wl1271_power_off(wl);
1081 wl->state = WL1271_STATE_OFF;
1084 mutex_unlock(&wl->mutex);
1090 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1092 struct wl1271 *wl = hw->priv;
1093 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1094 struct ieee80211_vif *vif = info->control.vif;
1095 struct wl12xx_vif *wlvif = NULL;
1096 unsigned long flags;
1101 wlvif = wl12xx_vif_to_data(vif);
1103 mapping = skb_get_queue_mapping(skb);
1104 q = wl1271_tx_get_queue(mapping);
1106 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
1108 spin_lock_irqsave(&wl->wl_lock, flags);
1111 * drop the packet if the link is invalid or the queue is stopped
1112 * for any reason but watermark. Watermark is a "soft"-stop so we
1113 * allow these packets through.
1115 if (hlid == WL12XX_INVALID_LINK_ID ||
1116 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1117 (wlcore_is_queue_stopped(wl, q) &&
1118 !wlcore_is_queue_stopped_by_reason(wl, q,
1119 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1120 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1121 ieee80211_free_txskb(hw, skb);
1125 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1127 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1129 wl->tx_queue_count[q]++;
1132 * The workqueue is slow to process the tx_queue and we need stop
1133 * the queue here, otherwise the queue will get too long.
1135 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
1136 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1137 wlcore_stop_queue_locked(wl, q,
1138 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1142 * The chip specific setup must run before the first TX packet -
1143 * before that, the tx_work will not be initialized!
1146 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1147 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1148 ieee80211_queue_work(wl->hw, &wl->tx_work);
1151 spin_unlock_irqrestore(&wl->wl_lock, flags);
1154 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1156 unsigned long flags;
1159 /* no need to queue a new dummy packet if one is already pending */
1160 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1163 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1165 spin_lock_irqsave(&wl->wl_lock, flags);
1166 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1167 wl->tx_queue_count[q]++;
1168 spin_unlock_irqrestore(&wl->wl_lock, flags);
1170 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1171 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1172 wl1271_tx_work_locked(wl);
1175 * If the FW TX is busy, TX work will be scheduled by the threaded
1176 * interrupt handler function
1182 * The size of the dummy packet should be at least 1400 bytes. However, in
1183 * order to minimize the number of bus transactions, aligning it to 512 bytes
1184 * boundaries could be beneficial, performance wise
1186 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1188 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1190 struct sk_buff *skb;
1191 struct ieee80211_hdr_3addr *hdr;
1192 unsigned int dummy_packet_size;
1194 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1195 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1197 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1199 wl1271_warning("Failed to allocate a dummy packet skb");
1203 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1205 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1206 memset(hdr, 0, sizeof(*hdr));
1207 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1208 IEEE80211_STYPE_NULLFUNC |
1209 IEEE80211_FCTL_TODS);
1211 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1213 /* Dummy packets require the TID to be management */
1214 skb->priority = WL1271_TID_MGMT;
1216 /* Initialize all fields that might be used */
1217 skb_set_queue_mapping(skb, 0);
1218 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1226 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1228 int num_fields = 0, in_field = 0, fields_size = 0;
1229 int i, pattern_len = 0;
1232 wl1271_warning("No mask in WoWLAN pattern");
1237 * The pattern is broken up into segments of bytes at different offsets
1238 * that need to be checked by the FW filter. Each segment is called
1239 * a field in the FW API. We verify that the total number of fields
1240 * required for this pattern won't exceed FW limits (8)
1241 * as well as the total fields buffer won't exceed the FW limit.
1242 * Note that if there's a pattern which crosses Ethernet/IP header
1243 * boundary a new field is required.
1245 for (i = 0; i < p->pattern_len; i++) {
1246 if (test_bit(i, (unsigned long *)p->mask)) {
1251 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1253 fields_size += pattern_len +
1254 RX_FILTER_FIELD_OVERHEAD;
1262 fields_size += pattern_len +
1263 RX_FILTER_FIELD_OVERHEAD;
1270 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1274 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1275 wl1271_warning("RX Filter too complex. Too many segments");
1279 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1280 wl1271_warning("RX filter pattern is too big");
1287 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1289 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1292 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1299 for (i = 0; i < filter->num_fields; i++)
1300 kfree(filter->fields[i].pattern);
1305 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1306 u16 offset, u8 flags,
1307 u8 *pattern, u8 len)
1309 struct wl12xx_rx_filter_field *field;
1311 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1312 wl1271_warning("Max fields per RX filter. can't alloc another");
1316 field = &filter->fields[filter->num_fields];
1318 field->pattern = kzalloc(len, GFP_KERNEL);
1319 if (!field->pattern) {
1320 wl1271_warning("Failed to allocate RX filter pattern");
1324 filter->num_fields++;
1326 field->offset = cpu_to_le16(offset);
1327 field->flags = flags;
1329 memcpy(field->pattern, pattern, len);
1334 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1336 int i, fields_size = 0;
1338 for (i = 0; i < filter->num_fields; i++)
1339 fields_size += filter->fields[i].len +
1340 sizeof(struct wl12xx_rx_filter_field) -
1346 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1350 struct wl12xx_rx_filter_field *field;
1352 for (i = 0; i < filter->num_fields; i++) {
1353 field = (struct wl12xx_rx_filter_field *)buf;
1355 field->offset = filter->fields[i].offset;
1356 field->flags = filter->fields[i].flags;
1357 field->len = filter->fields[i].len;
1359 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1360 buf += sizeof(struct wl12xx_rx_filter_field) -
1361 sizeof(u8 *) + field->len;
1366 * Allocates an RX filter returned through f
1367 * which needs to be freed using rx_filter_free()
1369 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1370 struct cfg80211_wowlan_trig_pkt_pattern *p,
1371 struct wl12xx_rx_filter **f)
1374 struct wl12xx_rx_filter *filter;
1378 filter = wl1271_rx_filter_alloc();
1380 wl1271_warning("Failed to alloc rx filter");
1386 while (i < p->pattern_len) {
1387 if (!test_bit(i, (unsigned long *)p->mask)) {
1392 for (j = i; j < p->pattern_len; j++) {
1393 if (!test_bit(j, (unsigned long *)p->mask))
1396 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1397 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1401 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1403 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1405 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1406 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1411 ret = wl1271_rx_filter_alloc_field(filter,
1414 &p->pattern[i], len);
1421 filter->action = FILTER_SIGNAL;
1427 wl1271_rx_filter_free(filter);
1433 static int wl1271_configure_wowlan(struct wl1271 *wl,
1434 struct cfg80211_wowlan *wow)
1438 if (!wow || wow->any || !wow->n_patterns) {
1439 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1440 wl1271_rx_filter_clear_all(wl);
1444 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1447 /* Validate all incoming patterns before clearing current FW state */
1448 for (i = 0; i < wow->n_patterns; i++) {
1449 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1451 wl1271_warning("Bad wowlan pattern %d", i);
1456 wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1457 wl1271_rx_filter_clear_all(wl);
1459 /* Translate WoWLAN patterns into filters */
1460 for (i = 0; i < wow->n_patterns; i++) {
1461 struct cfg80211_wowlan_trig_pkt_pattern *p;
1462 struct wl12xx_rx_filter *filter = NULL;
1464 p = &wow->patterns[i];
1466 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1468 wl1271_warning("Failed to create an RX filter from "
1469 "wowlan pattern %d", i);
1473 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1475 wl1271_rx_filter_free(filter);
1480 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1486 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1487 struct wl12xx_vif *wlvif,
1488 struct cfg80211_wowlan *wow)
1492 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1495 ret = wl1271_ps_elp_wakeup(wl);
1499 wl1271_configure_wowlan(wl, wow);
1500 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1501 wl->conf.conn.suspend_wake_up_event,
1502 wl->conf.conn.suspend_listen_interval);
1505 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1507 wl1271_ps_elp_sleep(wl);
1514 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1515 struct wl12xx_vif *wlvif)
1519 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1522 ret = wl1271_ps_elp_wakeup(wl);
1526 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1528 wl1271_ps_elp_sleep(wl);
1534 static int wl1271_configure_suspend(struct wl1271 *wl,
1535 struct wl12xx_vif *wlvif,
1536 struct cfg80211_wowlan *wow)
1538 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1539 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1540 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1541 return wl1271_configure_suspend_ap(wl, wlvif);
1545 static void wl1271_configure_resume(struct wl1271 *wl,
1546 struct wl12xx_vif *wlvif)
1549 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1550 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1552 if ((!is_ap) && (!is_sta))
1555 ret = wl1271_ps_elp_wakeup(wl);
1560 wl1271_configure_wowlan(wl, NULL);
1562 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1563 wl->conf.conn.wake_up_event,
1564 wl->conf.conn.listen_interval);
1567 wl1271_error("resume: wake up conditions failed: %d",
1571 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1574 wl1271_ps_elp_sleep(wl);
1577 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1578 struct cfg80211_wowlan *wow)
1580 struct wl1271 *wl = hw->priv;
1581 struct wl12xx_vif *wlvif;
1584 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1587 wl1271_tx_flush(wl);
1589 mutex_lock(&wl->mutex);
1590 wl->wow_enabled = true;
1591 wl12xx_for_each_wlvif(wl, wlvif) {
1592 ret = wl1271_configure_suspend(wl, wlvif, wow);
1594 mutex_unlock(&wl->mutex);
1595 wl1271_warning("couldn't prepare device to suspend");
1599 mutex_unlock(&wl->mutex);
1600 /* flush any remaining work */
1601 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1604 * disable and re-enable interrupts in order to flush
1607 wlcore_disable_interrupts(wl);
1610 * set suspended flag to avoid triggering a new threaded_irq
1611 * work. no need for spinlock as interrupts are disabled.
1613 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1615 wlcore_enable_interrupts(wl);
1616 flush_work(&wl->tx_work);
1617 flush_delayed_work(&wl->elp_work);
1622 static int wl1271_op_resume(struct ieee80211_hw *hw)
1624 struct wl1271 *wl = hw->priv;
1625 struct wl12xx_vif *wlvif;
1626 unsigned long flags;
1627 bool run_irq_work = false;
1629 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1631 WARN_ON(!wl->wow_enabled);
1634 * re-enable irq_work enqueuing, and call irq_work directly if
1635 * there is a pending work.
1637 spin_lock_irqsave(&wl->wl_lock, flags);
1638 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1639 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1640 run_irq_work = true;
1641 spin_unlock_irqrestore(&wl->wl_lock, flags);
1644 wl1271_debug(DEBUG_MAC80211,
1645 "run postponed irq_work directly");
1647 wlcore_enable_interrupts(wl);
1650 mutex_lock(&wl->mutex);
1651 wl12xx_for_each_wlvif(wl, wlvif) {
1652 wl1271_configure_resume(wl, wlvif);
1654 wl->wow_enabled = false;
1655 mutex_unlock(&wl->mutex);
1661 static int wl1271_op_start(struct ieee80211_hw *hw)
1663 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1666 * We have to delay the booting of the hardware because
1667 * we need to know the local MAC address before downloading and
1668 * initializing the firmware. The MAC address cannot be changed
1669 * after boot, and without the proper MAC address, the firmware
1670 * will not function properly.
1672 * The MAC address is first known when the corresponding interface
1673 * is added. That is where we will initialize the hardware.
1679 static void wl1271_op_stop(struct ieee80211_hw *hw)
1681 struct wl1271 *wl = hw->priv;
1684 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1687 * Interrupts must be disabled before setting the state to OFF.
1688 * Otherwise, the interrupt handler might be called and exit without
1689 * reading the interrupt status.
1691 wlcore_disable_interrupts(wl);
1692 mutex_lock(&wl->mutex);
1693 if (wl->state == WL1271_STATE_OFF) {
1694 mutex_unlock(&wl->mutex);
1697 * This will not necessarily enable interrupts as interrupts
1698 * may have been disabled when op_stop was called. It will,
1699 * however, balance the above call to disable_interrupts().
1701 wlcore_enable_interrupts(wl);
1706 * this must be before the cancel_work calls below, so that the work
1707 * functions don't perform further work.
1709 wl->state = WL1271_STATE_OFF;
1710 mutex_unlock(&wl->mutex);
1712 wl1271_flush_deferred_work(wl);
1713 cancel_delayed_work_sync(&wl->scan_complete_work);
1714 cancel_work_sync(&wl->netstack_work);
1715 cancel_work_sync(&wl->tx_work);
1716 cancel_delayed_work_sync(&wl->elp_work);
1717 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1718 cancel_delayed_work_sync(&wl->connection_loss_work);
1720 /* let's notify MAC80211 about the remaining pending TX frames */
1721 wl12xx_tx_reset(wl);
1722 mutex_lock(&wl->mutex);
1724 wl1271_power_off(wl);
1726 wl->band = IEEE80211_BAND_2GHZ;
1729 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1730 wl->channel_type = NL80211_CHAN_NO_HT;
1731 wl->tx_blocks_available = 0;
1732 wl->tx_allocated_blocks = 0;
1733 wl->tx_results_count = 0;
1734 wl->tx_packets_count = 0;
1735 wl->time_offset = 0;
1736 wl->ap_fw_ps_map = 0;
1738 wl->sched_scanning = false;
1739 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1740 memset(wl->links_map, 0, sizeof(wl->links_map));
1741 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1742 wl->active_sta_count = 0;
1744 /* The system link is always allocated */
1745 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1748 * this is performed after the cancel_work calls and the associated
1749 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1750 * get executed before all these vars have been reset.
1754 wl->tx_blocks_freed = 0;
1756 for (i = 0; i < NUM_TX_QUEUES; i++) {
1757 wl->tx_pkts_freed[i] = 0;
1758 wl->tx_allocated_pkts[i] = 0;
1761 wl1271_debugfs_reset(wl);
1763 kfree(wl->fw_status_1);
1764 wl->fw_status_1 = NULL;
1765 wl->fw_status_2 = NULL;
1766 kfree(wl->tx_res_if);
1767 wl->tx_res_if = NULL;
1768 kfree(wl->target_mem_map);
1769 wl->target_mem_map = NULL;
1771 mutex_unlock(&wl->mutex);
1774 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1776 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1777 WL12XX_MAX_RATE_POLICIES);
1778 if (policy >= WL12XX_MAX_RATE_POLICIES)
1781 __set_bit(policy, wl->rate_policies_map);
1786 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1788 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1791 __clear_bit(*idx, wl->rate_policies_map);
1792 *idx = WL12XX_MAX_RATE_POLICIES;
1795 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1797 switch (wlvif->bss_type) {
1798 case BSS_TYPE_AP_BSS:
1800 return WL1271_ROLE_P2P_GO;
1802 return WL1271_ROLE_AP;
1804 case BSS_TYPE_STA_BSS:
1806 return WL1271_ROLE_P2P_CL;
1808 return WL1271_ROLE_STA;
1811 return WL1271_ROLE_IBSS;
1814 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1816 return WL12XX_INVALID_ROLE_TYPE;
1819 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1821 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1824 /* clear everything but the persistent data */
1825 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1827 switch (ieee80211_vif_type_p2p(vif)) {
1828 case NL80211_IFTYPE_P2P_CLIENT:
1831 case NL80211_IFTYPE_STATION:
1832 wlvif->bss_type = BSS_TYPE_STA_BSS;
1834 case NL80211_IFTYPE_ADHOC:
1835 wlvif->bss_type = BSS_TYPE_IBSS;
1837 case NL80211_IFTYPE_P2P_GO:
1840 case NL80211_IFTYPE_AP:
1841 wlvif->bss_type = BSS_TYPE_AP_BSS;
1844 wlvif->bss_type = MAX_BSS_TYPE;
1848 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
1849 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
1850 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
1852 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
1853 wlvif->bss_type == BSS_TYPE_IBSS) {
1854 /* init sta/ibss data */
1855 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
1856 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
1857 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
1858 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
1859 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1860 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1861 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1864 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
1865 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
1866 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
1867 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
1868 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
1869 wl12xx_allocate_rate_policy(wl,
1870 &wlvif->ap.ucast_rate_idx[i]);
1871 wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
1873 * TODO: check if basic_rate shouldn't be
1874 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
1875 * instead (the same thing for STA above).
1877 wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
1878 /* TODO: this seems to be used only for STA, check it */
1879 wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
1882 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
1883 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
1884 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
1887 * mac80211 configures some values globally, while we treat them
1888 * per-interface. thus, on init, we have to copy them from wl
1890 wlvif->band = wl->band;
1891 wlvif->channel = wl->channel;
1892 wlvif->power_level = wl->power_level;
1893 wlvif->channel_type = wl->channel_type;
1895 INIT_WORK(&wlvif->rx_streaming_enable_work,
1896 wl1271_rx_streaming_enable_work);
1897 INIT_WORK(&wlvif->rx_streaming_disable_work,
1898 wl1271_rx_streaming_disable_work);
1899 INIT_LIST_HEAD(&wlvif->list);
1901 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
1902 (unsigned long) wlvif);
1906 static bool wl12xx_init_fw(struct wl1271 *wl)
1908 int retries = WL1271_BOOT_RETRIES;
1909 bool booted = false;
1910 struct wiphy *wiphy = wl->hw->wiphy;
1915 ret = wl12xx_chip_wakeup(wl, false);
1919 ret = wl->ops->boot(wl);
1923 ret = wl1271_hw_init(wl);
1931 mutex_unlock(&wl->mutex);
1932 /* Unlocking the mutex in the middle of handling is
1933 inherently unsafe. In this case we deem it safe to do,
1934 because we need to let any possibly pending IRQ out of
1935 the system (and while we are WL1271_STATE_OFF the IRQ
1936 work function will not do anything.) Also, any other
1937 possible concurrent operations will fail due to the
1938 current state, hence the wl1271 struct should be safe. */
1939 wlcore_disable_interrupts(wl);
1940 wl1271_flush_deferred_work(wl);
1941 cancel_work_sync(&wl->netstack_work);
1942 mutex_lock(&wl->mutex);
1944 wl1271_power_off(wl);
1948 wl1271_error("firmware boot failed despite %d retries",
1949 WL1271_BOOT_RETRIES);
1953 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1955 /* update hw/fw version info in wiphy struct */
1956 wiphy->hw_version = wl->chip.id;
1957 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1958 sizeof(wiphy->fw_version));
1961 * Now we know if 11a is supported (info from the NVS), so disable
1962 * 11a channels if not supported
1964 if (!wl->enable_11a)
1965 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
1967 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
1968 wl->enable_11a ? "" : "not ");
1970 wl->state = WL1271_STATE_ON;
1975 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
1977 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
1981 * Check whether a fw switch (i.e. moving from one loaded
1982 * fw to another) is needed. This function is also responsible
1983 * for updating wl->last_vif_count, so it must be called before
1984 * loading a non-plt fw (so the correct fw (single-role/multi-role)
1987 static bool wl12xx_need_fw_change(struct wl1271 *wl,
1988 struct vif_counter_data vif_counter_data,
1991 enum wl12xx_fw_type current_fw = wl->fw_type;
1992 u8 vif_count = vif_counter_data.counter;
1994 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
1997 /* increase the vif count if this is a new vif */
1998 if (add && !vif_counter_data.cur_vif_running)
2001 wl->last_vif_count = vif_count;
2003 /* no need for fw change if the device is OFF */
2004 if (wl->state == WL1271_STATE_OFF)
2007 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2009 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2016 * Enter "forced psm". Make sure the sta is in psm against the ap,
2017 * to make the fw switch a bit more disconnection-persistent.
2019 static void wl12xx_force_active_psm(struct wl1271 *wl)
2021 struct wl12xx_vif *wlvif;
2023 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2024 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2028 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2029 struct ieee80211_vif *vif)
2031 struct wl1271 *wl = hw->priv;
2032 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2033 struct vif_counter_data vif_count;
2036 bool booted = false;
2038 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2039 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2041 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2042 ieee80211_vif_type_p2p(vif), vif->addr);
2044 wl12xx_get_vif_count(hw, vif, &vif_count);
2046 mutex_lock(&wl->mutex);
2047 ret = wl1271_ps_elp_wakeup(wl);
2052 * in some very corner case HW recovery scenarios its possible to
2053 * get here before __wl1271_op_remove_interface is complete, so
2054 * opt out if that is the case.
2056 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2057 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2063 ret = wl12xx_init_vif_data(wl, vif);
2068 role_type = wl12xx_get_role_type(wl, wlvif);
2069 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2074 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2075 wl12xx_force_active_psm(wl);
2076 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2077 mutex_unlock(&wl->mutex);
2078 wl1271_recovery_work(&wl->recovery_work);
2083 * TODO: after the nvs issue will be solved, move this block
2084 * to start(), and make sure here the driver is ON.
2086 if (wl->state == WL1271_STATE_OFF) {
2088 * we still need this in order to configure the fw
2089 * while uploading the nvs
2091 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2093 booted = wl12xx_init_fw(wl);
2100 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2101 wlvif->bss_type == BSS_TYPE_IBSS) {
2103 * The device role is a special role used for
2104 * rx and tx frames prior to association (as
2105 * the STA role can get packets only from
2106 * its associated bssid)
2108 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2110 &wlvif->dev_role_id);
2115 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2116 role_type, &wlvif->role_id);
2120 ret = wl1271_init_vif_specific(wl, vif);
2124 list_add(&wlvif->list, &wl->wlvif_list);
2125 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2127 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2132 wl1271_ps_elp_sleep(wl);
2134 mutex_unlock(&wl->mutex);
2139 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2140 struct ieee80211_vif *vif,
2141 bool reset_tx_queues)
2143 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2146 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2148 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2151 /* because of hardware recovery, we may get here twice */
2152 if (wl->state != WL1271_STATE_ON)
2155 wl1271_info("down");
2157 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2158 wl->scan_vif == vif) {
2160 * Rearm the tx watchdog just before idling scan. This
2161 * prevents just-finished scans from triggering the watchdog
2163 wl12xx_rearm_tx_watchdog_locked(wl);
2165 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2166 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2167 wl->scan_vif = NULL;
2168 wl->scan.req = NULL;
2169 ieee80211_scan_completed(wl->hw, true);
2172 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2173 /* disable active roles */
2174 ret = wl1271_ps_elp_wakeup(wl);
2178 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2179 wlvif->bss_type == BSS_TYPE_IBSS) {
2180 if (wl12xx_dev_role_started(wlvif))
2181 wl12xx_stop_dev(wl, wlvif);
2183 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2188 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2192 wl1271_ps_elp_sleep(wl);
2195 /* clear all hlids (except system_hlid) */
2196 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2198 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2199 wlvif->bss_type == BSS_TYPE_IBSS) {
2200 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2201 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2202 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2203 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2205 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2206 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2207 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2208 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2209 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2210 wl12xx_free_rate_policy(wl,
2211 &wlvif->ap.ucast_rate_idx[i]);
2212 wl1271_free_ap_keys(wl, wlvif);
2215 dev_kfree_skb(wlvif->probereq);
2216 wlvif->probereq = NULL;
2217 wl12xx_tx_reset_wlvif(wl, wlvif);
2218 if (wl->last_wlvif == wlvif)
2219 wl->last_wlvif = NULL;
2220 list_del(&wlvif->list);
2221 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2222 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2223 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2225 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2230 mutex_unlock(&wl->mutex);
2232 del_timer_sync(&wlvif->rx_streaming_timer);
2233 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2234 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2236 mutex_lock(&wl->mutex);
2239 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2240 struct ieee80211_vif *vif)
2242 struct wl1271 *wl = hw->priv;
2243 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2244 struct wl12xx_vif *iter;
2245 struct vif_counter_data vif_count;
2246 bool cancel_recovery = true;
2248 wl12xx_get_vif_count(hw, vif, &vif_count);
2249 mutex_lock(&wl->mutex);
2251 if (wl->state == WL1271_STATE_OFF ||
2252 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2256 * wl->vif can be null here if someone shuts down the interface
2257 * just when hardware recovery has been started.
2259 wl12xx_for_each_wlvif(wl, iter) {
2263 __wl1271_op_remove_interface(wl, vif, true);
2266 WARN_ON(iter != wlvif);
2267 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2268 wl12xx_force_active_psm(wl);
2269 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2270 wl12xx_queue_recovery_work(wl);
2271 cancel_recovery = false;
2274 mutex_unlock(&wl->mutex);
2275 if (cancel_recovery)
2276 cancel_work_sync(&wl->recovery_work);
2279 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2280 struct ieee80211_vif *vif,
2281 enum nl80211_iftype new_type, bool p2p)
2283 struct wl1271 *wl = hw->priv;
2286 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2287 wl1271_op_remove_interface(hw, vif);
2289 vif->type = new_type;
2291 ret = wl1271_op_add_interface(hw, vif);
2293 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2297 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2301 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2304 * One of the side effects of the JOIN command is that is clears
2305 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2306 * to a WPA/WPA2 access point will therefore kill the data-path.
2307 * Currently the only valid scenario for JOIN during association
2308 * is on roaming, in which case we will also be given new keys.
2309 * Keep the below message for now, unless it starts bothering
2310 * users who really like to roam a lot :)
2312 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2313 wl1271_info("JOIN while associated.");
2315 /* clear encryption type */
2316 wlvif->encryption_type = KEY_NONE;
2319 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2322 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2324 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2328 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2332 * The join command disable the keep-alive mode, shut down its process,
2333 * and also clear the template config, so we need to reset it all after
2334 * the join. The acx_aid starts the keep-alive process, and the order
2335 * of the commands below is relevant.
2337 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2341 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2345 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2349 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2350 CMD_TEMPL_KLV_IDX_NULL_DATA,
2351 ACX_KEEP_ALIVE_TPL_VALID);
2359 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2363 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2364 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2366 wl12xx_cmd_stop_channel_switch(wl);
2367 ieee80211_chswitch_done(vif, false);
2370 /* to stop listening to a channel, we disconnect */
2371 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2375 /* reset TX security counters on a clean disconnect */
2376 wlvif->tx_security_last_seq_lsb = 0;
2377 wlvif->tx_security_seq = 0;
2383 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2385 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2386 wlvif->rate_set = wlvif->basic_rate_set;
2389 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2393 bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2395 if (idle == cur_idle)
2399 /* no need to croc if we weren't busy (e.g. during boot) */
2400 if (wl12xx_dev_role_started(wlvif)) {
2401 ret = wl12xx_stop_dev(wl, wlvif);
2406 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2407 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2410 ret = wl1271_acx_keep_alive_config(
2411 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2412 ACX_KEEP_ALIVE_TPL_INVALID);
2415 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2417 /* The current firmware only supports sched_scan in idle */
2418 if (wl->sched_scanning) {
2419 wl1271_scan_sched_scan_stop(wl);
2420 ieee80211_sched_scan_stopped(wl->hw);
2423 ret = wl12xx_start_dev(wl, wlvif);
2426 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2433 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2434 struct ieee80211_conf *conf, u32 changed)
2436 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2439 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2441 /* if the channel changes while joined, join again */
2442 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2443 ((wlvif->band != conf->channel->band) ||
2444 (wlvif->channel != channel) ||
2445 (wlvif->channel_type != conf->channel_type))) {
2446 /* send all pending packets */
2447 wl1271_tx_work_locked(wl);
2448 wlvif->band = conf->channel->band;
2449 wlvif->channel = channel;
2450 wlvif->channel_type = conf->channel_type;
2453 ret = wl1271_init_ap_rates(wl, wlvif);
2455 wl1271_error("AP rate policy change failed %d",
2459 * FIXME: the mac80211 should really provide a fixed
2460 * rate to use here. for now, just use the smallest
2461 * possible rate for the band as a fixed rate for
2462 * association frames and other control messages.
2464 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2465 wl1271_set_band_rate(wl, wlvif);
2468 wl1271_tx_min_rate_get(wl,
2469 wlvif->basic_rate_set);
2470 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2472 wl1271_warning("rate policy for channel "
2476 * change the ROC channel. do it only if we are
2477 * not idle. otherwise, CROC will be called
2480 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2482 wl12xx_dev_role_started(wlvif) &&
2483 !(conf->flags & IEEE80211_CONF_IDLE)) {
2484 ret = wl12xx_stop_dev(wl, wlvif);
2488 ret = wl12xx_start_dev(wl, wlvif);
2495 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2497 if ((conf->flags & IEEE80211_CONF_PS) &&
2498 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2499 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2504 if (wl->conf.conn.forced_ps) {
2505 ps_mode = STATION_POWER_SAVE_MODE;
2506 ps_mode_str = "forced";
2508 ps_mode = STATION_AUTO_PS_MODE;
2509 ps_mode_str = "auto";
2512 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2514 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2517 wl1271_warning("enter %s ps failed %d",
2520 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2521 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2523 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2525 ret = wl1271_ps_set_mode(wl, wlvif,
2526 STATION_ACTIVE_MODE);
2528 wl1271_warning("exit auto ps failed %d", ret);
2532 if (conf->power_level != wlvif->power_level) {
2533 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2537 wlvif->power_level = conf->power_level;
2543 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2545 struct wl1271 *wl = hw->priv;
2546 struct wl12xx_vif *wlvif;
2547 struct ieee80211_conf *conf = &hw->conf;
2548 int channel, ret = 0;
2550 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2552 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2555 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2557 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2561 * mac80211 will go to idle nearly immediately after transmitting some
2562 * frames, such as the deauth. To make sure those frames reach the air,
2563 * wait here until the TX queue is fully flushed.
2565 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
2566 ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2567 (conf->flags & IEEE80211_CONF_IDLE)))
2568 wl1271_tx_flush(wl);
2570 mutex_lock(&wl->mutex);
2572 /* we support configuring the channel and band even while off */
2573 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2574 wl->band = conf->channel->band;
2575 wl->channel = channel;
2576 wl->channel_type = conf->channel_type;
2579 if (changed & IEEE80211_CONF_CHANGE_POWER)
2580 wl->power_level = conf->power_level;
2582 if (unlikely(wl->state == WL1271_STATE_OFF))
2585 ret = wl1271_ps_elp_wakeup(wl);
2589 /* configure each interface */
2590 wl12xx_for_each_wlvif(wl, wlvif) {
2591 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2597 wl1271_ps_elp_sleep(wl);
2600 mutex_unlock(&wl->mutex);
2605 struct wl1271_filter_params {
2608 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2611 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2612 struct netdev_hw_addr_list *mc_list)
2614 struct wl1271_filter_params *fp;
2615 struct netdev_hw_addr *ha;
2616 struct wl1271 *wl = hw->priv;
2618 if (unlikely(wl->state == WL1271_STATE_OFF))
2621 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2623 wl1271_error("Out of memory setting filters.");
2627 /* update multicast filtering parameters */
2628 fp->mc_list_length = 0;
2629 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2630 fp->enabled = false;
2633 netdev_hw_addr_list_for_each(ha, mc_list) {
2634 memcpy(fp->mc_list[fp->mc_list_length],
2635 ha->addr, ETH_ALEN);
2636 fp->mc_list_length++;
2640 return (u64)(unsigned long)fp;
2643 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2646 FIF_BCN_PRBRESP_PROMISC | \
2650 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2651 unsigned int changed,
2652 unsigned int *total, u64 multicast)
2654 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2655 struct wl1271 *wl = hw->priv;
2656 struct wl12xx_vif *wlvif;
2660 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2661 " total %x", changed, *total);
2663 mutex_lock(&wl->mutex);
2665 *total &= WL1271_SUPPORTED_FILTERS;
2666 changed &= WL1271_SUPPORTED_FILTERS;
2668 if (unlikely(wl->state == WL1271_STATE_OFF))
2671 ret = wl1271_ps_elp_wakeup(wl);
2675 wl12xx_for_each_wlvif(wl, wlvif) {
2676 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2677 if (*total & FIF_ALLMULTI)
2678 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2682 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2685 fp->mc_list_length);
2692 * the fw doesn't provide an api to configure the filters. instead,
2693 * the filters configuration is based on the active roles / ROC
2698 wl1271_ps_elp_sleep(wl);
2701 mutex_unlock(&wl->mutex);
2705 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2706 u8 id, u8 key_type, u8 key_size,
2707 const u8 *key, u8 hlid, u32 tx_seq_32,
2710 struct wl1271_ap_key *ap_key;
2713 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2715 if (key_size > MAX_KEY_SIZE)
2719 * Find next free entry in ap_keys. Also check we are not replacing
2722 for (i = 0; i < MAX_NUM_KEYS; i++) {
2723 if (wlvif->ap.recorded_keys[i] == NULL)
2726 if (wlvif->ap.recorded_keys[i]->id == id) {
2727 wl1271_warning("trying to record key replacement");
2732 if (i == MAX_NUM_KEYS)
2735 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2740 ap_key->key_type = key_type;
2741 ap_key->key_size = key_size;
2742 memcpy(ap_key->key, key, key_size);
2743 ap_key->hlid = hlid;
2744 ap_key->tx_seq_32 = tx_seq_32;
2745 ap_key->tx_seq_16 = tx_seq_16;
2747 wlvif->ap.recorded_keys[i] = ap_key;
2751 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2755 for (i = 0; i < MAX_NUM_KEYS; i++) {
2756 kfree(wlvif->ap.recorded_keys[i]);
2757 wlvif->ap.recorded_keys[i] = NULL;
2761 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2764 struct wl1271_ap_key *key;
2765 bool wep_key_added = false;
2767 for (i = 0; i < MAX_NUM_KEYS; i++) {
2769 if (wlvif->ap.recorded_keys[i] == NULL)
2772 key = wlvif->ap.recorded_keys[i];
2774 if (hlid == WL12XX_INVALID_LINK_ID)
2775 hlid = wlvif->ap.bcast_hlid;
2777 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2778 key->id, key->key_type,
2779 key->key_size, key->key,
2780 hlid, key->tx_seq_32,
2785 if (key->key_type == KEY_WEP)
2786 wep_key_added = true;
2789 if (wep_key_added) {
2790 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2791 wlvif->ap.bcast_hlid);
2797 wl1271_free_ap_keys(wl, wlvif);
2801 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2802 u16 action, u8 id, u8 key_type,
2803 u8 key_size, const u8 *key, u32 tx_seq_32,
2804 u16 tx_seq_16, struct ieee80211_sta *sta)
2807 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2810 struct wl1271_station *wl_sta;
2814 wl_sta = (struct wl1271_station *)sta->drv_priv;
2815 hlid = wl_sta->hlid;
2817 hlid = wlvif->ap.bcast_hlid;
2820 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
2822 * We do not support removing keys after AP shutdown.
2823 * Pretend we do to make mac80211 happy.
2825 if (action != KEY_ADD_OR_REPLACE)
2828 ret = wl1271_record_ap_key(wl, wlvif, id,
2830 key, hlid, tx_seq_32,
2833 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
2834 id, key_type, key_size,
2835 key, hlid, tx_seq_32,
2843 static const u8 bcast_addr[ETH_ALEN] = {
2844 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2847 addr = sta ? sta->addr : bcast_addr;
2849 if (is_zero_ether_addr(addr)) {
2850 /* We dont support TX only encryption */
2854 /* The wl1271 does not allow to remove unicast keys - they
2855 will be cleared automatically on next CMD_JOIN. Ignore the
2856 request silently, as we dont want the mac80211 to emit
2857 an error message. */
2858 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
2861 /* don't remove key if hlid was already deleted */
2862 if (action == KEY_REMOVE &&
2863 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
2866 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
2867 id, key_type, key_size,
2868 key, addr, tx_seq_32,
2873 /* the default WEP key needs to be configured at least once */
2874 if (key_type == KEY_WEP) {
2875 ret = wl12xx_cmd_set_default_wep_key(wl,
2886 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2887 struct ieee80211_vif *vif,
2888 struct ieee80211_sta *sta,
2889 struct ieee80211_key_conf *key_conf)
2891 struct wl1271 *wl = hw->priv;
2892 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2898 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
2900 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
2901 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2902 key_conf->cipher, key_conf->keyidx,
2903 key_conf->keylen, key_conf->flags);
2904 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
2906 mutex_lock(&wl->mutex);
2908 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2913 ret = wl1271_ps_elp_wakeup(wl);
2917 switch (key_conf->cipher) {
2918 case WLAN_CIPHER_SUITE_WEP40:
2919 case WLAN_CIPHER_SUITE_WEP104:
2922 key_conf->hw_key_idx = key_conf->keyidx;
2924 case WLAN_CIPHER_SUITE_TKIP:
2925 key_type = KEY_TKIP;
2927 key_conf->hw_key_idx = key_conf->keyidx;
2928 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2929 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2931 case WLAN_CIPHER_SUITE_CCMP:
2934 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2935 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2936 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2938 case WL1271_CIPHER_SUITE_GEM:
2940 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2941 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2944 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
2952 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2953 key_conf->keyidx, key_type,
2954 key_conf->keylen, key_conf->key,
2955 tx_seq_32, tx_seq_16, sta);
2957 wl1271_error("Could not add or replace key");
2962 * reconfiguring arp response if the unicast (or common)
2963 * encryption key type was changed
2965 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
2966 (sta || key_type == KEY_WEP) &&
2967 wlvif->encryption_type != key_type) {
2968 wlvif->encryption_type = key_type;
2969 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
2971 wl1271_warning("build arp rsp failed: %d", ret);
2978 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
2979 key_conf->keyidx, key_type,
2980 key_conf->keylen, key_conf->key,
2983 wl1271_error("Could not remove key");
2989 wl1271_error("Unsupported key cmd 0x%x", cmd);
2995 wl1271_ps_elp_sleep(wl);
2998 mutex_unlock(&wl->mutex);
3003 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3004 struct ieee80211_vif *vif,
3005 struct cfg80211_scan_request *req)
3007 struct wl1271 *wl = hw->priv;
3012 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3015 ssid = req->ssids[0].ssid;
3016 len = req->ssids[0].ssid_len;
3019 mutex_lock(&wl->mutex);
3021 if (wl->state == WL1271_STATE_OFF) {
3023 * We cannot return -EBUSY here because cfg80211 will expect
3024 * a call to ieee80211_scan_completed if we do - in this case
3025 * there won't be any call.
3031 ret = wl1271_ps_elp_wakeup(wl);
3035 /* fail if there is any role in ROC */
3036 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3037 /* don't allow scanning right now */
3042 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3044 wl1271_ps_elp_sleep(wl);
3046 mutex_unlock(&wl->mutex);
3051 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3052 struct ieee80211_vif *vif)
3054 struct wl1271 *wl = hw->priv;
3057 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3059 mutex_lock(&wl->mutex);
3061 if (wl->state == WL1271_STATE_OFF)
3064 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3067 ret = wl1271_ps_elp_wakeup(wl);
3071 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3072 ret = wl1271_scan_stop(wl);
3078 * Rearm the tx watchdog just before idling scan. This
3079 * prevents just-finished scans from triggering the watchdog
3081 wl12xx_rearm_tx_watchdog_locked(wl);
3083 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3084 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3085 wl->scan_vif = NULL;
3086 wl->scan.req = NULL;
3087 ieee80211_scan_completed(wl->hw, true);
3090 wl1271_ps_elp_sleep(wl);
3092 mutex_unlock(&wl->mutex);
3094 cancel_delayed_work_sync(&wl->scan_complete_work);
3097 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3098 struct ieee80211_vif *vif,
3099 struct cfg80211_sched_scan_request *req,
3100 struct ieee80211_sched_scan_ies *ies)
3102 struct wl1271 *wl = hw->priv;
3103 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3106 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3108 mutex_lock(&wl->mutex);
3110 if (wl->state == WL1271_STATE_OFF) {
3115 ret = wl1271_ps_elp_wakeup(wl);
3119 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3123 ret = wl1271_scan_sched_scan_start(wl, wlvif);
3127 wl->sched_scanning = true;
3130 wl1271_ps_elp_sleep(wl);
3132 mutex_unlock(&wl->mutex);
3136 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3137 struct ieee80211_vif *vif)
3139 struct wl1271 *wl = hw->priv;
3142 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3144 mutex_lock(&wl->mutex);
3146 if (wl->state == WL1271_STATE_OFF)
3149 ret = wl1271_ps_elp_wakeup(wl);
3153 wl1271_scan_sched_scan_stop(wl);
3155 wl1271_ps_elp_sleep(wl);
3157 mutex_unlock(&wl->mutex);
3160 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3162 struct wl1271 *wl = hw->priv;
3165 mutex_lock(&wl->mutex);
3167 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3172 ret = wl1271_ps_elp_wakeup(wl);
3176 ret = wl1271_acx_frag_threshold(wl, value);
3178 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3180 wl1271_ps_elp_sleep(wl);
3183 mutex_unlock(&wl->mutex);
3188 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3190 struct wl1271 *wl = hw->priv;
3191 struct wl12xx_vif *wlvif;
3194 mutex_lock(&wl->mutex);
3196 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3201 ret = wl1271_ps_elp_wakeup(wl);
3205 wl12xx_for_each_wlvif(wl, wlvif) {
3206 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3208 wl1271_warning("set rts threshold failed: %d", ret);
3210 wl1271_ps_elp_sleep(wl);
3213 mutex_unlock(&wl->mutex);
3218 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3221 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3223 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3227 wl1271_error("No SSID in IEs!");
3232 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
3233 wl1271_error("SSID is too long!");
3237 wlvif->ssid_len = ssid_len;
3238 memcpy(wlvif->ssid, ptr+2, ssid_len);
3242 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3245 const u8 *next, *end = skb->data + skb->len;
3246 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3247 skb->len - ieoffset);
3252 memmove(ie, next, end - next);
3253 skb_trim(skb, skb->len - len);
3256 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3257 unsigned int oui, u8 oui_type,
3261 const u8 *next, *end = skb->data + skb->len;
3262 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3263 skb->data + ieoffset,
3264 skb->len - ieoffset);
3269 memmove(ie, next, end - next);
3270 skb_trim(skb, skb->len - len);
3273 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3274 struct ieee80211_vif *vif)
3276 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3277 struct sk_buff *skb;
3280 skb = ieee80211_proberesp_get(wl->hw, vif);
3284 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3285 CMD_TEMPL_AP_PROBE_RESPONSE,
3294 wl1271_debug(DEBUG_AP, "probe response updated");
3295 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3301 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3302 struct ieee80211_vif *vif,
3304 size_t probe_rsp_len,
3307 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3308 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3309 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3310 int ssid_ie_offset, ie_offset, templ_len;
3313 /* no need to change probe response if the SSID is set correctly */
3314 if (wlvif->ssid_len > 0)
3315 return wl1271_cmd_template_set(wl, wlvif->role_id,
3316 CMD_TEMPL_AP_PROBE_RESPONSE,
3321 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3322 wl1271_error("probe_rsp template too big");
3326 /* start searching from IE offset */
3327 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3329 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3330 probe_rsp_len - ie_offset);
3332 wl1271_error("No SSID in beacon!");
3336 ssid_ie_offset = ptr - probe_rsp_data;
3337 ptr += (ptr[1] + 2);
3339 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3341 /* insert SSID from bss_conf */
3342 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3343 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3344 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3345 bss_conf->ssid, bss_conf->ssid_len);
3346 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3348 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3349 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3350 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3352 return wl1271_cmd_template_set(wl, wlvif->role_id,
3353 CMD_TEMPL_AP_PROBE_RESPONSE,
3359 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3360 struct ieee80211_vif *vif,
3361 struct ieee80211_bss_conf *bss_conf,
3364 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3367 if (changed & BSS_CHANGED_ERP_SLOT) {
3368 if (bss_conf->use_short_slot)
3369 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3371 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3373 wl1271_warning("Set slot time failed %d", ret);
3378 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3379 if (bss_conf->use_short_preamble)
3380 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3382 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3385 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3386 if (bss_conf->use_cts_prot)
3387 ret = wl1271_acx_cts_protect(wl, wlvif,
3390 ret = wl1271_acx_cts_protect(wl, wlvif,
3391 CTSPROTECT_DISABLE);
3393 wl1271_warning("Set ctsprotect failed %d", ret);
3402 static int wlcore_set_beacon_template(struct wl1271 *wl,
3403 struct ieee80211_vif *vif,
3406 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3407 struct ieee80211_hdr *hdr;
3410 int ieoffset = offsetof(struct ieee80211_mgmt,
3412 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3420 wl1271_debug(DEBUG_MASTER, "beacon updated");
3422 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3424 dev_kfree_skb(beacon);
3427 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3428 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3430 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3435 dev_kfree_skb(beacon);
3440 * In case we already have a probe-resp beacon set explicitly
3441 * by usermode, don't use the beacon data.
3443 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3446 /* remove TIM ie from probe response */
3447 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3450 * remove p2p ie from probe response.
3451 * the fw reponds to probe requests that don't include
3452 * the p2p ie. probe requests with p2p ie will be passed,
3453 * and will be responded by the supplicant (the spec
3454 * forbids including the p2p ie when responding to probe
3455 * requests that didn't include it).
3457 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3458 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3460 hdr = (struct ieee80211_hdr *) beacon->data;
3461 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3462 IEEE80211_STYPE_PROBE_RESP);
3464 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3469 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3470 CMD_TEMPL_PROBE_RESPONSE,
3475 dev_kfree_skb(beacon);
3483 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3484 struct ieee80211_vif *vif,
3485 struct ieee80211_bss_conf *bss_conf,
3488 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3489 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3492 if ((changed & BSS_CHANGED_BEACON_INT)) {
3493 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3494 bss_conf->beacon_int);
3496 wlvif->beacon_int = bss_conf->beacon_int;
3499 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3500 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3502 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3505 if ((changed & BSS_CHANGED_BEACON)) {
3506 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3513 wl1271_error("beacon info change failed: %d", ret);
3517 /* AP mode changes */
3518 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3519 struct ieee80211_vif *vif,
3520 struct ieee80211_bss_conf *bss_conf,
3523 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3526 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3527 u32 rates = bss_conf->basic_rates;
3529 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3531 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3532 wlvif->basic_rate_set);
3534 ret = wl1271_init_ap_rates(wl, wlvif);
3536 wl1271_error("AP rate policy change failed %d", ret);
3540 ret = wl1271_ap_init_templates(wl, vif);
3544 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3548 ret = wlcore_set_beacon_template(wl, vif, true);
3553 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3557 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3558 if (bss_conf->enable_beacon) {
3559 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3560 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3564 ret = wl1271_ap_init_hwenc(wl, wlvif);
3568 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3569 wl1271_debug(DEBUG_AP, "started AP");
3572 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3573 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3577 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3578 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3580 wl1271_debug(DEBUG_AP, "stopped AP");
3585 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3589 /* Handle HT information change */
3590 if ((changed & BSS_CHANGED_HT) &&
3591 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3592 ret = wl1271_acx_set_ht_information(wl, wlvif,
3593 bss_conf->ht_operation_mode);
3595 wl1271_warning("Set ht information failed %d", ret);
3604 /* STA/IBSS mode changes */
3605 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3606 struct ieee80211_vif *vif,
3607 struct ieee80211_bss_conf *bss_conf,
3610 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3611 bool do_join = false, set_assoc = false;
3612 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3613 bool ibss_joined = false;
3614 u32 sta_rate_set = 0;
3616 struct ieee80211_sta *sta;
3617 bool sta_exists = false;
3618 struct ieee80211_sta_ht_cap sta_ht_cap;
3621 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3627 if (changed & BSS_CHANGED_IBSS) {
3628 if (bss_conf->ibss_joined) {
3629 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3632 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3634 wl1271_unjoin(wl, wlvif);
3638 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3641 /* Need to update the SSID (for filtering etc) */
3642 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3645 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3646 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3647 bss_conf->enable_beacon ? "enabled" : "disabled");
3652 if (changed & BSS_CHANGED_IDLE && !is_ibss) {
3653 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3655 wl1271_warning("idle mode change failed %d", ret);
3658 if ((changed & BSS_CHANGED_CQM)) {
3659 bool enable = false;
3660 if (bss_conf->cqm_rssi_thold)
3662 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3663 bss_conf->cqm_rssi_thold,
3664 bss_conf->cqm_rssi_hyst);
3667 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3670 if (changed & BSS_CHANGED_BSSID)
3671 if (!is_zero_ether_addr(bss_conf->bssid)) {
3672 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3676 ret = wl1271_build_qos_null_data(wl, vif);
3681 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3683 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3687 /* save the supp_rates of the ap */
3688 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3689 if (sta->ht_cap.ht_supported)
3691 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3692 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3693 sta_ht_cap = sta->ht_cap;
3700 if ((changed & BSS_CHANGED_ASSOC)) {
3701 if (bss_conf->assoc) {
3704 wlvif->aid = bss_conf->aid;
3705 wlvif->channel_type = bss_conf->channel_type;
3706 wlvif->beacon_int = bss_conf->beacon_int;
3711 * use basic rates from AP, and determine lowest rate
3712 * to use with control frames.
3714 rates = bss_conf->basic_rates;
3715 wlvif->basic_rate_set =
3716 wl1271_tx_enabled_rates_get(wl, rates,
3719 wl1271_tx_min_rate_get(wl,
3720 wlvif->basic_rate_set);
3723 wl1271_tx_enabled_rates_get(wl,
3726 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3731 * with wl1271, we don't need to update the
3732 * beacon_int and dtim_period, because the firmware
3733 * updates it by itself when the first beacon is
3734 * received after a join.
3736 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3741 * Get a template for hardware connection maintenance
3743 dev_kfree_skb(wlvif->probereq);
3744 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3747 ieoffset = offsetof(struct ieee80211_mgmt,
3748 u.probe_req.variable);
3749 wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3751 /* enable the connection monitoring feature */
3752 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3756 /* use defaults when not associated */
3758 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3761 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3765 /* free probe-request template */
3766 dev_kfree_skb(wlvif->probereq);
3767 wlvif->probereq = NULL;
3769 /* revert back to minimum rates for the current band */
3770 wl1271_set_band_rate(wl, wlvif);
3772 wl1271_tx_min_rate_get(wl,
3773 wlvif->basic_rate_set);
3774 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3778 /* disable connection monitor features */
3779 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3781 /* Disable the keep-alive feature */
3782 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3786 /* restore the bssid filter and go to dummy bssid */
3789 * we might have to disable roc, if there was
3790 * no IF_OPER_UP notification.
3793 ret = wl12xx_croc(wl, wlvif->role_id);
3798 * (we also need to disable roc in case of
3799 * roaming on the same channel. until we will
3800 * have a better flow...)
3802 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
3803 ret = wl12xx_croc(wl,
3804 wlvif->dev_role_id);
3809 wl1271_unjoin(wl, wlvif);
3810 if (!bss_conf->idle)
3811 wl12xx_start_dev(wl, wlvif);
3816 if (changed & BSS_CHANGED_IBSS) {
3817 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
3818 bss_conf->ibss_joined);
3820 if (bss_conf->ibss_joined) {
3821 u32 rates = bss_conf->basic_rates;
3822 wlvif->basic_rate_set =
3823 wl1271_tx_enabled_rates_get(wl, rates,
3826 wl1271_tx_min_rate_get(wl,
3827 wlvif->basic_rate_set);
3829 /* by default, use 11b + OFDM rates */
3830 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
3831 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3837 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3842 ret = wl1271_join(wl, wlvif, set_assoc);
3844 wl1271_warning("cmd join failed %d", ret);
3848 /* ROC until connected (after EAPOL exchange) */
3850 ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
3854 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
3855 wl12xx_set_authorized(wl, wlvif);
3858 * stop device role if started (we might already be in
3861 if (wl12xx_dev_role_started(wlvif)) {
3862 ret = wl12xx_stop_dev(wl, wlvif);
3868 /* Handle new association with HT. Do this after join. */
3870 if ((changed & BSS_CHANGED_HT) &&
3871 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3872 ret = wl1271_acx_set_ht_capabilities(wl,
3877 wl1271_warning("Set ht cap true failed %d",
3882 /* handle new association without HT and disassociation */
3883 else if (changed & BSS_CHANGED_ASSOC) {
3884 ret = wl1271_acx_set_ht_capabilities(wl,
3889 wl1271_warning("Set ht cap false failed %d",
3896 /* Handle HT information change. Done after join. */
3897 if ((changed & BSS_CHANGED_HT) &&
3898 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3899 ret = wl1271_acx_set_ht_information(wl, wlvif,
3900 bss_conf->ht_operation_mode);
3902 wl1271_warning("Set ht information failed %d", ret);
3907 /* Handle arp filtering. Done after join. */
3908 if ((changed & BSS_CHANGED_ARP_FILTER) ||
3909 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
3910 __be32 addr = bss_conf->arp_addr_list[0];
3911 wlvif->sta.qos = bss_conf->qos;
3912 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
3914 if (bss_conf->arp_addr_cnt == 1 &&
3915 bss_conf->arp_filter_enabled) {
3916 wlvif->ip_addr = addr;
3918 * The template should have been configured only upon
3919 * association. however, it seems that the correct ip
3920 * isn't being set (when sending), so we have to
3921 * reconfigure the template upon every ip change.
3923 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3925 wl1271_warning("build arp rsp failed: %d", ret);
3929 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
3930 (ACX_ARP_FILTER_ARP_FILTERING |
3931 ACX_ARP_FILTER_AUTO_ARP),
3935 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
3946 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3947 struct ieee80211_vif *vif,
3948 struct ieee80211_bss_conf *bss_conf,
3951 struct wl1271 *wl = hw->priv;
3952 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3953 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3956 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
3960 * make sure to cancel pending disconnections if our association
3963 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
3964 cancel_delayed_work_sync(&wl->connection_loss_work);
3966 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
3967 !bss_conf->enable_beacon)
3968 wl1271_tx_flush(wl);
3970 mutex_lock(&wl->mutex);
3972 if (unlikely(wl->state == WL1271_STATE_OFF))
3975 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
3978 ret = wl1271_ps_elp_wakeup(wl);
3983 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
3985 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
3987 wl1271_ps_elp_sleep(wl);
3990 mutex_unlock(&wl->mutex);
3993 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
3994 struct ieee80211_vif *vif, u16 queue,
3995 const struct ieee80211_tx_queue_params *params)
3997 struct wl1271 *wl = hw->priv;
3998 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4002 mutex_lock(&wl->mutex);
4004 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4007 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4009 ps_scheme = CONF_PS_SCHEME_LEGACY;
4011 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4014 ret = wl1271_ps_elp_wakeup(wl);
4019 * the txop is confed in units of 32us by the mac80211,
4022 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4023 params->cw_min, params->cw_max,
4024 params->aifs, params->txop << 5);
4028 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4029 CONF_CHANNEL_TYPE_EDCF,
4030 wl1271_tx_get_queue(queue),
4031 ps_scheme, CONF_ACK_POLICY_LEGACY,
4035 wl1271_ps_elp_sleep(wl);
4038 mutex_unlock(&wl->mutex);
4043 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4044 struct ieee80211_vif *vif)
4047 struct wl1271 *wl = hw->priv;
4048 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4049 u64 mactime = ULLONG_MAX;
4052 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4054 mutex_lock(&wl->mutex);
4056 if (unlikely(wl->state == WL1271_STATE_OFF))
4059 ret = wl1271_ps_elp_wakeup(wl);
4063 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4068 wl1271_ps_elp_sleep(wl);
4071 mutex_unlock(&wl->mutex);
4075 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4076 struct survey_info *survey)
4078 struct wl1271 *wl = hw->priv;
4079 struct ieee80211_conf *conf = &hw->conf;
4084 survey->channel = conf->channel;
4085 survey->filled = SURVEY_INFO_NOISE_DBM;
4086 survey->noise = wl->noise;
4091 static int wl1271_allocate_sta(struct wl1271 *wl,
4092 struct wl12xx_vif *wlvif,
4093 struct ieee80211_sta *sta)
4095 struct wl1271_station *wl_sta;
4099 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4100 wl1271_warning("could not allocate HLID - too much stations");
4104 wl_sta = (struct wl1271_station *)sta->drv_priv;
4105 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4107 wl1271_warning("could not allocate HLID - too many links");
4111 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4112 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4113 wl->active_sta_count++;
4117 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4119 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4122 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4123 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4124 wl->links[hlid].ba_bitmap = 0;
4125 __clear_bit(hlid, &wl->ap_ps_map);
4126 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4127 wl12xx_free_link(wl, wlvif, &hlid);
4128 wl->active_sta_count--;
4131 * rearm the tx watchdog when the last STA is freed - give the FW a
4132 * chance to return STA-buffered packets before complaining.
4134 if (wl->active_sta_count == 0)
4135 wl12xx_rearm_tx_watchdog_locked(wl);
4138 static int wl12xx_sta_add(struct wl1271 *wl,
4139 struct wl12xx_vif *wlvif,
4140 struct ieee80211_sta *sta)
4142 struct wl1271_station *wl_sta;
4146 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4148 ret = wl1271_allocate_sta(wl, wlvif, sta);
4152 wl_sta = (struct wl1271_station *)sta->drv_priv;
4153 hlid = wl_sta->hlid;
4155 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4157 wl1271_free_sta(wl, wlvif, hlid);
4162 static int wl12xx_sta_remove(struct wl1271 *wl,
4163 struct wl12xx_vif *wlvif,
4164 struct ieee80211_sta *sta)
4166 struct wl1271_station *wl_sta;
4169 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4171 wl_sta = (struct wl1271_station *)sta->drv_priv;
4173 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4176 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4180 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4184 static int wl12xx_update_sta_state(struct wl1271 *wl,
4185 struct wl12xx_vif *wlvif,
4186 struct ieee80211_sta *sta,
4187 enum ieee80211_sta_state old_state,
4188 enum ieee80211_sta_state new_state)
4190 struct wl1271_station *wl_sta;
4192 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4193 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4196 wl_sta = (struct wl1271_station *)sta->drv_priv;
4197 hlid = wl_sta->hlid;
4199 /* Add station (AP mode) */
4201 old_state == IEEE80211_STA_NOTEXIST &&
4202 new_state == IEEE80211_STA_NONE)
4203 return wl12xx_sta_add(wl, wlvif, sta);
4205 /* Remove station (AP mode) */
4207 old_state == IEEE80211_STA_NONE &&
4208 new_state == IEEE80211_STA_NOTEXIST) {
4210 wl12xx_sta_remove(wl, wlvif, sta);
4214 /* Authorize station (AP mode) */
4216 new_state == IEEE80211_STA_AUTHORIZED) {
4217 ret = wl12xx_cmd_set_peer_state(wl, hlid);
4221 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4226 /* Authorize station */
4228 new_state == IEEE80211_STA_AUTHORIZED) {
4229 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4230 return wl12xx_set_authorized(wl, wlvif);
4234 old_state == IEEE80211_STA_AUTHORIZED &&
4235 new_state == IEEE80211_STA_ASSOC) {
4236 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4243 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4244 struct ieee80211_vif *vif,
4245 struct ieee80211_sta *sta,
4246 enum ieee80211_sta_state old_state,
4247 enum ieee80211_sta_state new_state)
4249 struct wl1271 *wl = hw->priv;
4250 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4253 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4254 sta->aid, old_state, new_state);
4256 mutex_lock(&wl->mutex);
4258 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4263 ret = wl1271_ps_elp_wakeup(wl);
4267 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4269 wl1271_ps_elp_sleep(wl);
4271 mutex_unlock(&wl->mutex);
4272 if (new_state < old_state)
4277 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4278 struct ieee80211_vif *vif,
4279 enum ieee80211_ampdu_mlme_action action,
4280 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4283 struct wl1271 *wl = hw->priv;
4284 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4286 u8 hlid, *ba_bitmap;
4288 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4291 /* sanity check - the fields in FW are only 8bits wide */
4292 if (WARN_ON(tid > 0xFF))
4295 mutex_lock(&wl->mutex);
4297 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4302 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4303 hlid = wlvif->sta.hlid;
4304 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4305 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4306 struct wl1271_station *wl_sta;
4308 wl_sta = (struct wl1271_station *)sta->drv_priv;
4309 hlid = wl_sta->hlid;
4310 ba_bitmap = &wl->links[hlid].ba_bitmap;
4316 ret = wl1271_ps_elp_wakeup(wl);
4320 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4324 case IEEE80211_AMPDU_RX_START:
4325 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4330 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4332 wl1271_error("exceeded max RX BA sessions");
4336 if (*ba_bitmap & BIT(tid)) {
4338 wl1271_error("cannot enable RX BA session on active "
4343 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4346 *ba_bitmap |= BIT(tid);
4347 wl->ba_rx_session_count++;
4351 case IEEE80211_AMPDU_RX_STOP:
4352 if (!(*ba_bitmap & BIT(tid))) {
4354 wl1271_error("no active RX BA session on tid: %d",
4359 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4362 *ba_bitmap &= ~BIT(tid);
4363 wl->ba_rx_session_count--;
4368 * The BA initiator session management in FW independently.
4369 * Falling break here on purpose for all TX APDU commands.
4371 case IEEE80211_AMPDU_TX_START:
4372 case IEEE80211_AMPDU_TX_STOP:
4373 case IEEE80211_AMPDU_TX_OPERATIONAL:
4378 wl1271_error("Incorrect ampdu action id=%x\n", action);
4382 wl1271_ps_elp_sleep(wl);
4385 mutex_unlock(&wl->mutex);
4390 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4391 struct ieee80211_vif *vif,
4392 const struct cfg80211_bitrate_mask *mask)
4394 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4395 struct wl1271 *wl = hw->priv;
4398 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4399 mask->control[NL80211_BAND_2GHZ].legacy,
4400 mask->control[NL80211_BAND_5GHZ].legacy);
4402 mutex_lock(&wl->mutex);
4404 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
4405 wlvif->bitrate_masks[i] =
4406 wl1271_tx_enabled_rates_get(wl,
4407 mask->control[i].legacy,
4410 if (unlikely(wl->state == WL1271_STATE_OFF))
4413 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4414 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4416 ret = wl1271_ps_elp_wakeup(wl);
4420 wl1271_set_band_rate(wl, wlvif);
4422 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4423 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4425 wl1271_ps_elp_sleep(wl);
4428 mutex_unlock(&wl->mutex);
4433 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4434 struct ieee80211_channel_switch *ch_switch)
4436 struct wl1271 *wl = hw->priv;
4437 struct wl12xx_vif *wlvif;
4440 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4442 wl1271_tx_flush(wl);
4444 mutex_lock(&wl->mutex);
4446 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4447 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4448 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4449 ieee80211_chswitch_done(vif, false);
4454 ret = wl1271_ps_elp_wakeup(wl);
4458 /* TODO: change mac80211 to pass vif as param */
4459 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4460 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4463 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4466 wl1271_ps_elp_sleep(wl);
4469 mutex_unlock(&wl->mutex);
4472 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4474 struct wl1271 *wl = hw->priv;
4477 mutex_lock(&wl->mutex);
4479 if (unlikely(wl->state == WL1271_STATE_OFF))
4482 /* packets are considered pending if in the TX queue or the FW */
4483 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4485 mutex_unlock(&wl->mutex);
4490 /* can't be const, mac80211 writes to this */
4491 static struct ieee80211_rate wl1271_rates[] = {
4493 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4494 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4496 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4497 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4498 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4500 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4501 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4502 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4504 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4505 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4506 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4508 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4509 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4511 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4512 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4514 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4515 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4517 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4518 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4520 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4521 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4523 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4524 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4526 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4527 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4529 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4530 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4533 /* can't be const, mac80211 writes to this */
4534 static struct ieee80211_channel wl1271_channels[] = {
4535 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4536 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4537 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4538 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4539 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4540 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4541 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4542 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4543 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4544 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4545 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4546 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4547 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4548 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4551 /* can't be const, mac80211 writes to this */
4552 static struct ieee80211_supported_band wl1271_band_2ghz = {
4553 .channels = wl1271_channels,
4554 .n_channels = ARRAY_SIZE(wl1271_channels),
4555 .bitrates = wl1271_rates,
4556 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4559 /* 5 GHz data rates for WL1273 */
4560 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4562 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4563 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4565 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4566 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4568 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4569 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4571 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4572 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4574 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4575 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4577 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4578 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4580 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4581 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4583 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4584 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4587 /* 5 GHz band channels for WL1273 */
4588 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4589 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4590 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4591 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4592 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4593 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4594 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4595 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4596 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4597 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4598 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4599 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4600 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4601 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4602 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4603 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4604 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4605 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4606 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4607 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4608 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4609 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4610 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4611 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4612 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4613 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4614 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4615 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4616 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4617 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4618 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4619 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4620 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4621 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4622 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4625 static struct ieee80211_supported_band wl1271_band_5ghz = {
4626 .channels = wl1271_channels_5ghz,
4627 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4628 .bitrates = wl1271_rates_5ghz,
4629 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4632 static const struct ieee80211_ops wl1271_ops = {
4633 .start = wl1271_op_start,
4634 .stop = wl1271_op_stop,
4635 .add_interface = wl1271_op_add_interface,
4636 .remove_interface = wl1271_op_remove_interface,
4637 .change_interface = wl12xx_op_change_interface,
4639 .suspend = wl1271_op_suspend,
4640 .resume = wl1271_op_resume,
4642 .config = wl1271_op_config,
4643 .prepare_multicast = wl1271_op_prepare_multicast,
4644 .configure_filter = wl1271_op_configure_filter,
4646 .set_key = wl1271_op_set_key,
4647 .hw_scan = wl1271_op_hw_scan,
4648 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4649 .sched_scan_start = wl1271_op_sched_scan_start,
4650 .sched_scan_stop = wl1271_op_sched_scan_stop,
4651 .bss_info_changed = wl1271_op_bss_info_changed,
4652 .set_frag_threshold = wl1271_op_set_frag_threshold,
4653 .set_rts_threshold = wl1271_op_set_rts_threshold,
4654 .conf_tx = wl1271_op_conf_tx,
4655 .get_tsf = wl1271_op_get_tsf,
4656 .get_survey = wl1271_op_get_survey,
4657 .sta_state = wl12xx_op_sta_state,
4658 .ampdu_action = wl1271_op_ampdu_action,
4659 .tx_frames_pending = wl1271_tx_frames_pending,
4660 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4661 .channel_switch = wl12xx_op_channel_switch,
4662 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4666 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4672 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4673 wl1271_error("Illegal RX rate from HW: %d", rate);
4677 idx = wl->band_rate_to_idx[band][rate];
4678 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4679 wl1271_error("Unsupported RX rate from HW: %d", rate);
4686 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
4687 struct device_attribute *attr,
4690 struct wl1271 *wl = dev_get_drvdata(dev);
4695 mutex_lock(&wl->mutex);
4696 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
4698 mutex_unlock(&wl->mutex);
4704 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4705 struct device_attribute *attr,
4706 const char *buf, size_t count)
4708 struct wl1271 *wl = dev_get_drvdata(dev);
4712 ret = kstrtoul(buf, 10, &res);
4714 wl1271_warning("incorrect value written to bt_coex_mode");
4718 mutex_lock(&wl->mutex);
4722 if (res == wl->sg_enabled)
4725 wl->sg_enabled = res;
4727 if (wl->state == WL1271_STATE_OFF)
4730 ret = wl1271_ps_elp_wakeup(wl);
4734 wl1271_acx_sg_enable(wl, wl->sg_enabled);
4735 wl1271_ps_elp_sleep(wl);
4738 mutex_unlock(&wl->mutex);
4742 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
4743 wl1271_sysfs_show_bt_coex_state,
4744 wl1271_sysfs_store_bt_coex_state);
4746 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4747 struct device_attribute *attr,
4750 struct wl1271 *wl = dev_get_drvdata(dev);
4755 mutex_lock(&wl->mutex);
4756 if (wl->hw_pg_ver >= 0)
4757 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
4759 len = snprintf(buf, len, "n/a\n");
4760 mutex_unlock(&wl->mutex);
4765 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4766 wl1271_sysfs_show_hw_pg_ver, NULL);
4768 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
4769 struct bin_attribute *bin_attr,
4770 char *buffer, loff_t pos, size_t count)
4772 struct device *dev = container_of(kobj, struct device, kobj);
4773 struct wl1271 *wl = dev_get_drvdata(dev);
4777 ret = mutex_lock_interruptible(&wl->mutex);
4779 return -ERESTARTSYS;
4781 /* Let only one thread read the log at a time, blocking others */
4782 while (wl->fwlog_size == 0) {
4785 prepare_to_wait_exclusive(&wl->fwlog_waitq,
4787 TASK_INTERRUPTIBLE);
4789 if (wl->fwlog_size != 0) {
4790 finish_wait(&wl->fwlog_waitq, &wait);
4794 mutex_unlock(&wl->mutex);
4797 finish_wait(&wl->fwlog_waitq, &wait);
4799 if (signal_pending(current))
4800 return -ERESTARTSYS;
4802 ret = mutex_lock_interruptible(&wl->mutex);
4804 return -ERESTARTSYS;
4807 /* Check if the fwlog is still valid */
4808 if (wl->fwlog_size < 0) {
4809 mutex_unlock(&wl->mutex);
4813 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4814 len = min(count, (size_t)wl->fwlog_size);
4815 wl->fwlog_size -= len;
4816 memcpy(buffer, wl->fwlog, len);
4818 /* Make room for new messages */
4819 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
4821 mutex_unlock(&wl->mutex);
4826 static struct bin_attribute fwlog_attr = {
4827 .attr = {.name = "fwlog", .mode = S_IRUSR},
4828 .read = wl1271_sysfs_read_fwlog,
4831 static void wl1271_connection_loss_work(struct work_struct *work)
4833 struct delayed_work *dwork;
4835 struct ieee80211_vif *vif;
4836 struct wl12xx_vif *wlvif;
4838 dwork = container_of(work, struct delayed_work, work);
4839 wl = container_of(dwork, struct wl1271, connection_loss_work);
4841 wl1271_info("Connection loss work.");
4843 mutex_lock(&wl->mutex);
4845 if (unlikely(wl->state == WL1271_STATE_OFF))
4848 /* Call mac80211 connection loss */
4849 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4850 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
4852 vif = wl12xx_wlvif_to_vif(wlvif);
4853 ieee80211_connection_loss(vif);
4856 mutex_unlock(&wl->mutex);
4859 static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
4860 u32 oui, u32 nic, int n)
4864 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
4867 if (nic + n - 1 > 0xffffff)
4868 wl1271_warning("NIC part of the MAC address wraps around!");
4870 for (i = 0; i < n; i++) {
4871 wl->addresses[i].addr[0] = (u8)(oui >> 16);
4872 wl->addresses[i].addr[1] = (u8)(oui >> 8);
4873 wl->addresses[i].addr[2] = (u8) oui;
4874 wl->addresses[i].addr[3] = (u8)(nic >> 16);
4875 wl->addresses[i].addr[4] = (u8)(nic >> 8);
4876 wl->addresses[i].addr[5] = (u8) nic;
4880 wl->hw->wiphy->n_addresses = n;
4881 wl->hw->wiphy->addresses = wl->addresses;
4884 static int wl12xx_get_hw_info(struct wl1271 *wl)
4888 ret = wl12xx_set_power_on(wl);
4892 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
4894 wl->fuse_oui_addr = 0;
4895 wl->fuse_nic_addr = 0;
4897 wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
4899 if (wl->ops->get_mac)
4900 wl->ops->get_mac(wl);
4902 wl1271_power_off(wl);
4907 static int wl1271_register_hw(struct wl1271 *wl)
4910 u32 oui_addr = 0, nic_addr = 0;
4912 if (wl->mac80211_registered)
4915 ret = wl1271_fetch_nvs(wl);
4917 /* NOTE: The wl->nvs->nvs element must be first, in
4918 * order to simplify the casting, we assume it is at
4919 * the beginning of the wl->nvs structure.
4921 u8 *nvs_ptr = (u8 *)wl->nvs;
4924 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
4926 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
4929 /* if the MAC address is zeroed in the NVS derive from fuse */
4930 if (oui_addr == 0 && nic_addr == 0) {
4931 oui_addr = wl->fuse_oui_addr;
4932 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
4933 nic_addr = wl->fuse_nic_addr + 1;
4936 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
4938 ret = ieee80211_register_hw(wl->hw);
4940 wl1271_error("unable to register mac80211 hw: %d", ret);
4944 wl->mac80211_registered = true;
4946 wl1271_debugfs_init(wl);
4948 wl1271_notice("loaded");
4954 static void wl1271_unregister_hw(struct wl1271 *wl)
4957 wl1271_plt_stop(wl);
4959 ieee80211_unregister_hw(wl->hw);
4960 wl->mac80211_registered = false;
4964 static int wl1271_init_ieee80211(struct wl1271 *wl)
4966 static const u32 cipher_suites[] = {
4967 WLAN_CIPHER_SUITE_WEP40,
4968 WLAN_CIPHER_SUITE_WEP104,
4969 WLAN_CIPHER_SUITE_TKIP,
4970 WLAN_CIPHER_SUITE_CCMP,
4971 WL1271_CIPHER_SUITE_GEM,
4974 /* The tx descriptor buffer */
4975 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
4977 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
4978 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
4981 /* FIXME: find a proper value */
4982 wl->hw->channel_change_time = 10000;
4983 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
4985 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
4986 IEEE80211_HW_SUPPORTS_PS |
4987 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
4988 IEEE80211_HW_SUPPORTS_UAPSD |
4989 IEEE80211_HW_HAS_RATE_CONTROL |
4990 IEEE80211_HW_CONNECTION_MONITOR |
4991 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4992 IEEE80211_HW_SPECTRUM_MGMT |
4993 IEEE80211_HW_AP_LINK_PS |
4994 IEEE80211_HW_AMPDU_AGGREGATION |
4995 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
4996 IEEE80211_HW_SCAN_WHILE_IDLE;
4998 wl->hw->wiphy->cipher_suites = cipher_suites;
4999 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5001 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5002 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5003 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5004 wl->hw->wiphy->max_scan_ssids = 1;
5005 wl->hw->wiphy->max_sched_scan_ssids = 16;
5006 wl->hw->wiphy->max_match_sets = 16;
5008 * Maximum length of elements in scanning probe request templates
5009 * should be the maximum length possible for a template, without
5010 * the IEEE80211 header of the template
5012 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5013 sizeof(struct ieee80211_header);
5015 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5016 sizeof(struct ieee80211_header);
5018 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5019 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5021 /* make sure all our channels fit in the scanned_ch bitmask */
5022 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5023 ARRAY_SIZE(wl1271_channels_5ghz) >
5024 WL1271_MAX_CHANNELS);
5026 * We keep local copies of the band structs because we need to
5027 * modify them on a per-device basis.
5029 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5030 sizeof(wl1271_band_2ghz));
5031 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5032 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5033 sizeof(*wl->ht_cap));
5034 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5035 sizeof(wl1271_band_5ghz));
5036 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5037 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5038 sizeof(*wl->ht_cap));
5040 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5041 &wl->bands[IEEE80211_BAND_2GHZ];
5042 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5043 &wl->bands[IEEE80211_BAND_5GHZ];
5046 wl->hw->max_rates = 1;
5048 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5050 /* the FW answers probe-requests in AP-mode */
5051 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5052 wl->hw->wiphy->probe_resp_offload =
5053 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5054 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5055 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5057 SET_IEEE80211_DEV(wl->hw, wl->dev);
5059 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5060 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5062 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5067 #define WL1271_DEFAULT_CHANNEL 0
5069 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5071 struct ieee80211_hw *hw;
5076 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5078 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5080 wl1271_error("could not alloc ieee80211_hw");
5086 memset(wl, 0, sizeof(*wl));
5088 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5090 wl1271_error("could not alloc wl priv");
5092 goto err_priv_alloc;
5095 INIT_LIST_HEAD(&wl->wlvif_list);
5099 for (i = 0; i < NUM_TX_QUEUES; i++)
5100 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5101 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5103 skb_queue_head_init(&wl->deferred_rx_queue);
5104 skb_queue_head_init(&wl->deferred_tx_queue);
5106 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5107 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5108 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5109 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5110 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5111 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5112 INIT_DELAYED_WORK(&wl->connection_loss_work,
5113 wl1271_connection_loss_work);
5115 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5116 if (!wl->freezable_wq) {
5121 wl->channel = WL1271_DEFAULT_CHANNEL;
5123 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5124 wl->band = IEEE80211_BAND_2GHZ;
5125 wl->channel_type = NL80211_CHAN_NO_HT;
5127 wl->sg_enabled = true;
5130 wl->ap_fw_ps_map = 0;
5132 wl->platform_quirks = 0;
5133 wl->sched_scanning = false;
5134 wl->system_hlid = WL12XX_SYSTEM_HLID;
5135 wl->active_sta_count = 0;
5137 init_waitqueue_head(&wl->fwlog_waitq);
5139 /* The system link is always allocated */
5140 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5142 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5143 for (i = 0; i < wl->num_tx_desc; i++)
5144 wl->tx_frames[i] = NULL;
5146 spin_lock_init(&wl->wl_lock);
5148 wl->state = WL1271_STATE_OFF;
5149 wl->fw_type = WL12XX_FW_TYPE_NONE;
5150 mutex_init(&wl->mutex);
5152 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5153 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5154 if (!wl->aggr_buf) {
5159 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5160 if (!wl->dummy_packet) {
5165 /* Allocate one page for the FW log */
5166 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5169 goto err_dummy_packet;
5172 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
5181 free_page((unsigned long)wl->fwlog);
5184 dev_kfree_skb(wl->dummy_packet);
5187 free_pages((unsigned long)wl->aggr_buf, order);
5190 destroy_workqueue(wl->freezable_wq);
5193 wl1271_debugfs_exit(wl);
5197 ieee80211_free_hw(hw);
5201 return ERR_PTR(ret);
5203 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5205 int wlcore_free_hw(struct wl1271 *wl)
5207 /* Unblock any fwlog readers */
5208 mutex_lock(&wl->mutex);
5209 wl->fwlog_size = -1;
5210 wake_up_interruptible_all(&wl->fwlog_waitq);
5211 mutex_unlock(&wl->mutex);
5213 device_remove_bin_file(wl->dev, &fwlog_attr);
5215 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5217 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5218 free_page((unsigned long)wl->fwlog);
5219 dev_kfree_skb(wl->dummy_packet);
5220 free_pages((unsigned long)wl->aggr_buf,
5221 get_order(WL1271_AGGR_BUFFER_SIZE));
5223 wl1271_debugfs_exit(wl);
5227 wl->fw_type = WL12XX_FW_TYPE_NONE;
5231 kfree(wl->fw_status_1);
5232 kfree(wl->tx_res_if);
5233 destroy_workqueue(wl->freezable_wq);
5236 ieee80211_free_hw(wl->hw);
5240 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5242 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5244 struct wl1271 *wl = cookie;
5245 unsigned long flags;
5247 wl1271_debug(DEBUG_IRQ, "IRQ");
5249 /* complete the ELP completion */
5250 spin_lock_irqsave(&wl->wl_lock, flags);
5251 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5252 if (wl->elp_compl) {
5253 complete(wl->elp_compl);
5254 wl->elp_compl = NULL;
5257 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5258 /* don't enqueue a work right now. mark it as pending */
5259 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5260 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5261 disable_irq_nosync(wl->irq);
5262 pm_wakeup_event(wl->dev, 0);
5263 spin_unlock_irqrestore(&wl->wl_lock, flags);
5266 spin_unlock_irqrestore(&wl->wl_lock, flags);
5268 return IRQ_WAKE_THREAD;
5271 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5273 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5274 unsigned long irqflags;
5277 if (!wl->ops || !wl->ptable) {
5282 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5284 /* adjust some runtime configuration parameters */
5285 wlcore_adjust_conf(wl);
5287 wl->irq = platform_get_irq(pdev, 0);
5288 wl->platform_quirks = pdata->platform_quirks;
5289 wl->set_power = pdata->set_power;
5290 wl->dev = &pdev->dev;
5291 wl->if_ops = pdata->ops;
5293 platform_set_drvdata(pdev, wl);
5295 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5296 irqflags = IRQF_TRIGGER_RISING;
5298 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5300 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
5304 wl1271_error("request_irq() failed: %d", ret);
5308 ret = enable_irq_wake(wl->irq);
5310 wl->irq_wake_enabled = true;
5311 device_init_wakeup(wl->dev, 1);
5312 if (pdata->pwr_in_suspend) {
5313 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5314 wl->hw->wiphy->wowlan.n_patterns =
5315 WL1271_MAX_RX_FILTERS;
5316 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5317 wl->hw->wiphy->wowlan.pattern_max_len =
5318 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5321 disable_irq(wl->irq);
5323 ret = wl12xx_get_hw_info(wl);
5325 wl1271_error("couldn't get hw info");
5329 ret = wl->ops->identify_chip(wl);
5333 ret = wl1271_init_ieee80211(wl);
5337 ret = wl1271_register_hw(wl);
5341 /* Create sysfs file to control bt coex state */
5342 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5344 wl1271_error("failed to create sysfs file bt_coex_state");
5348 /* Create sysfs file to get HW PG version */
5349 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5351 wl1271_error("failed to create sysfs file hw_pg_ver");
5352 goto out_bt_coex_state;
5355 /* Create sysfs file for the FW log */
5356 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5358 wl1271_error("failed to create sysfs file fwlog");
5365 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5368 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5371 free_irq(wl->irq, wl);
5379 EXPORT_SYMBOL_GPL(wlcore_probe);
5381 int __devexit wlcore_remove(struct platform_device *pdev)
5383 struct wl1271 *wl = platform_get_drvdata(pdev);
5385 if (wl->irq_wake_enabled) {
5386 device_init_wakeup(wl->dev, 0);
5387 disable_irq_wake(wl->irq);
5389 wl1271_unregister_hw(wl);
5390 free_irq(wl->irq, wl);
5395 EXPORT_SYMBOL_GPL(wlcore_remove);
5397 u32 wl12xx_debug_level = DEBUG_NONE;
5398 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5399 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5400 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5402 module_param_named(fwlog, fwlog_param, charp, 0);
5403 MODULE_PARM_DESC(fwlog,
5404 "FW logger options: continuous, ondemand, dbgpins or disable");
5406 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5407 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5409 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5410 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5412 MODULE_LICENSE("GPL");
5413 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5414 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");